You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:50:40 UTC

[15/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index 57fdb35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index ec24c7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index f7d9f15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(data_dir,
-                recursive=True,
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 8b29cc3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_histroryserver:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.hdfs_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    if params.yarn_log_aggregation_enabled:
-      hdfs_directory(name=params.yarn_nm_app_log_dir,
-                     owner=params.yarn_user,
-                     group=params.user_group,
-                     mode="777",
-                     recursive_chmod=True
-      )
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="1777"
-    )
-
-  if params.has_falcon_host:
-    if params.falcon_store_uri[0:4] == "hdfs":
-      hdfs_directory(name=params.store_uri,
-                     owner=params.falcon_user,
-                     mode="755"
-      )
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-
-  ExecuteHadoop('dfsadmin -refreshNodes',
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index f2134d5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index deb01d5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    #TODO remove when config action will be implemented
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 99fdc70..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,193 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-if System.get_instance().os_type == "oraclelinux":
-  ulimit_cmd = ''
-else:
-  ulimit_cmd = "ulimit -c unlimited; "
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-falcon_user = config['configurations']['global']['falcon_user']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-
-falcon_store_uri = config['configurations']['global']['falcon_store_uri']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index d27b13a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 8f682ec..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.config(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 017a47a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 30
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  if params.dfs_ha_enabled:
-    namenode_id = params.namenode_id
-    dfs_check_nn_status_cmd = format(
-      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
-
-  #if params.stack_version[0] == "2":
-  mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  #  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("{dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index 1f9ba65..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def config(self, env):
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c3af46e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100644
index b0f5268..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>INFO,FA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>${hive.query.id}.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA</name>
-    <value>org.apache.log4j.FileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,FA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100644
index e92c3e8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>WARN,DRFA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>hive.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.encoding</name>
-    <value>UTF-8</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,DRFA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index bfdc8ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,285 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
-      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
-      process runs as.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
-      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
-      this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-    of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 3e89601..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,158 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.1.1</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 8d31b91..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index 9e7b33f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi