You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2014/12/17 21:05:35 UTC

[03/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
deleted file mode 100644
index 2010c02..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
+++ /dev/null
@@ -1,29 +0,0 @@
-Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
-Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
-Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
-Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
-Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
-Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
-Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
-Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
-Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
-Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
-Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
-Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
-Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
-Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
-Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
-Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
-Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
-Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
-Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
-Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
-The cluster is balanced. Exiting...
-Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
deleted file mode 100644
index 0cce48c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import time
-import sys
-from threading import Thread
-
-
-def write_function(path, handle, interval):
-  with open(path) as f:
-      for line in f:
-          handle.write(line)
-          handle.flush()
-          time.sleep(interval)
-          
-thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
-thread.start()
-
-threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
-threaderr.start()
-
-thread.join()  
-
-
-def rebalancer_out():
-  write_function('balancer.log', sys.stdout)
-  
-def rebalancer_err():
-  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index 4afd4ce..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import datanode_upgrade
-from hdfs_datanode import datanode
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from hdfs import hdfs
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-datanode {version}"))
-
-
-  def post_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    # ensure the DataNode has started and rejoined the cluster
-    datanode_upgrade.post_upgrade_check()
-
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-    # pre-upgrade steps shutdown the datanode, so there's no need to call
-    # action=stop
-    if rolling_restart:
-      datanode_upgrade.pre_upgrade_shutdown()
-    else:
-      datanode(action="stop")
-
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs()
-    datanode(action="configure")
-
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
deleted file mode 100644
index 88af1f9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core.shell import call
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.decorator import retry
-
-
-def pre_upgrade_shutdown():
-  """
-  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
-  DataNode in preparation for an upgrade. This will then periodically check
-  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  """
-  import params
-
-  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
-
-  command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
-  Execute(command, user=params.hdfs_user, tries=1 )
-
-  # verify that the datanode is down
-  _check_datanode_shutdown()
-
-
-def post_upgrade_check():
-  """
-  Verifies that the DataNode has rejoined the cluster. This function will
-  obtain the Kerberos ticket if security is enabled.
-  :return:
-  """
-  import params
-
-  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd,user = params.hdfs_user)
-
-  # verify that the datanode has started and rejoined the HDFS cluster
-  _check_datanode_startup()
-
-
-@retry(times=12, sleep_time=10, err_class=Fail)
-def _check_datanode_shutdown():
-  """
-  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
-  several times, pausing in between runs. Once the DataNode stops responding
-  this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  """
-  import params
-
-  command = format('hdfs dfsadmin -getDatanodeInfo {dfs_dn_ipc_address}')
-
-  try:
-    Execute(command, user=params.hdfs_user, tries=1)
-  except:
-    Logger.info("DataNode has successfully shutdown for upgrade.")
-    return
-
-  Logger.info("DataNode has not shutdown.")
-  raise Fail('DataNode has not shutdown.')
-
-
-@retry(times=12, sleep_time=10, err_class=Fail)
-def _check_datanode_startup():
-  """
-  Checks that a DataNode is reported as being alive via the
-  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  """
-  import params
-
-  try:
-    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
-    command = 'hdfs dfsadmin -report -live'
-    return_code, hdfs_output = call(command, user=params.hdfs_user)
-  except:
-    raise Fail('Unable to determine if the DataNode has started after upgrade.')
-
-  if return_code == 0:
-    if params.hostname.lower() in hdfs_output.lower():
-      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
-      return
-    else:
-      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
-
-  # return_code is not 0, fail
-  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
deleted file mode 100644
index 25c1067..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hdfs(name=None):
-  import params
-
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
-       owner=tc_owner,
-       content=Template("slaves.j2")
-  )
-  
-  if params.lzo_enabled:
-    Package(params.lzo_packages_for_current_host)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index b9f244a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs import hdfs
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-    hdfs()
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index d432d88..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
-from utils import service
-
-
-def create_dirs(data_dir, params):
-  """
-  :param data_dir: The directory to create
-  :param params: parameters
-  """
-  Directory(data_dir,
-            recursive=True,
-            recursive_permission=True,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            ignore_failures=True
-  )
-
-
-def datanode(action=None):
-  import params
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0751,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-    handle_dfs_data_dir(create_dirs, params)
-
-  elif action == "start" or action == "stop":
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 9f470a3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.exceptions import ComponentIsNotRunning
-
-from utils import service, safe_zkfc_op
-
-
-def namenode(action=None, do_format=True, rolling_restart=False, env=None):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if do_format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    options = "-rollingUpgrade started" if rolling_restart else ""
-
-    service(
-      action="start",
-      name="namenode",
-      user=params.hdfs_user,
-      options=options,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-    if rolling_restart:    
-      # Must start Zookeeper Failover Controller if it exists on this host because it could have been killed in order to initiate the failover.
-      safe_zkfc_op(action, env)
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-              user = params.hdfs_user)
-
-    if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
-    else:
-      dfs_check_nn_status_cmd = None
-
-    namenode_safe_mode_off = format("hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'")
-
-    # If HA is enabled and it is in standby, then stay in safemode, otherwise, leave safemode.
-    leave_safe_mode = True
-    if dfs_check_nn_status_cmd is not None:
-      code, out = shell.call(dfs_check_nn_status_cmd) # If active NN, code will be 0
-      if code != 0:
-        leave_safe_mode = False
-
-    if leave_safe_mode:
-      # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON), if so, then leave it
-      code, out = shell.call(namenode_safe_mode_off)
-      if code != 0:
-        leave_safe_mode_cmd = format("hdfs --config {hadoop_conf_dir} dfsadmin -safemode leave")
-        Execute(leave_safe_mode_cmd,
-                user=params.hdfs_user,
-                path=[params.hadoop_bin_dir],
-        )
-
-    # Verify if Namenode should be in safemode OFF
-    Execute(namenode_safe_mode_off,
-            tries=40,
-            try_sleep=10,
-            path=[params.hadoop_bin_dir],
-            user=params.hdfs_user,
-            only_if=dfs_check_nn_status_cmd #skip when HA not active
-    )
-    create_hdfs_directories(dfs_check_nn_status_cmd)
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", 
-      user=params.hdfs_user
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True,
-            recursive_permission=True
-  )
-
-
-def create_hdfs_directories(check):
-  import params
-
-  params.HdfsDirectory("/tmp",
-                       action="create_delayed",
-                       owner=params.hdfs_user,
-                       mode=0777
-  )
-  params.HdfsDirectory(params.smoke_hdfs_user_dir,
-                       action="create_delayed",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-  )
-  params.HdfsDirectory(None, action="create",
-                       only_if=check #skip creation when HA not active
-  )
-
-def format_namenode(force=None):
-  import params
-
-  old_mark_dir = params.namenode_formatted_old_mark_dir
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True,
-                    bin_dir=params.hadoop_bin_dir,
-                    conf_dir=hadoop_conf_dir)
-    else:
-      File(format("{tmp_dir}/checkForFormat.sh"),
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
-        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
-              not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )
-    
-      Directory(mark_dir,
-        recursive = True
-      )
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-  nn_kinit_cmd = params.nn_kinit_cmd
-  
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-  
-  if not params.update_exclude_file_only:
-    Execute(nn_kinit_cmd,
-            user=hdfs_user
-    )
-
-    if params.dfs_ha_enabled:
-      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-      # need to execute each command scoped to a particular namenode
-      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-    else:
-      nn_refresh_cmd = format('dfsadmin -refreshNodes')
-    ExecuteHadoop(nn_refresh_cmd,
-                  user=hdfs_user,
-                  conf_dir=conf_dir,
-                  kinit_override=True,
-                  bin_dir=params.hadoop_bin_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
deleted file mode 100644
index 1dc545e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import re
-
-class HdfsParser():
-  def __init__(self):
-    self.initialLine = None
-    self.state = None
-  
-  def parseLine(self, line):
-    hdfsLine = HdfsLine()
-    type, matcher = hdfsLine.recognizeType(line)
-    if(type == HdfsLine.LineType.HeaderStart):
-      self.state = 'PROCESS_STARTED'
-    elif (type == HdfsLine.LineType.Progress):
-      self.state = 'PROGRESS'
-      hdfsLine.parseProgressLog(line, matcher)
-      if(self.initialLine == None): self.initialLine = hdfsLine
-      
-      return hdfsLine 
-    elif (type == HdfsLine.LineType.ProgressEnd):
-      self.state = 'PROCESS_FINISED'
-    return None
-    
-class HdfsLine():
-  
-  class LineType:
-    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
-  
-  
-  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
-  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
-  
-  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
-  PROGRESS_PATTERN = re.compile(
-                            "(?P<date>.*?)\s+" + 
-                            "(?P<iteration>\d+)\s+" + 
-                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
-                            MEMORY_PATTERN % (2,2,2) + "\s+" +
-                            MEMORY_PATTERN % (3,3,3)
-                            )
-  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
-  
-  def __init__(self):
-    self.date = None
-    self.iteration = None
-    self.bytesAlreadyMoved = None 
-    self.bytesLeftToMove = None
-    self.bytesBeingMoved = None 
-    self.bytesAlreadyMovedStr = None 
-    self.bytesLeftToMoveStr = None
-    self.bytesBeingMovedStr = None 
-  
-  def recognizeType(self, line):
-    for (type, pattern) in (
-                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
-                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
-                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
-                            ):
-      m = re.match(pattern, line)
-      if m:
-        return type, m
-    return HdfsLine.LineType.Unknown, None
-    
-  def parseProgressLog(self, line, m):
-    '''
-    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
-    
-    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-    
-    Throws AmbariException in case of parsing errors
-
-    '''
-    m = re.match(self.PROGRESS_PATTERN, line)
-    if m:
-      self.date = m.group('date') 
-      self.iteration = int(m.group('iteration'))
-       
-      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
-      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
-      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
-       
-      self.bytesAlreadyMovedStr = m.group('memmult_1') 
-      self.bytesLeftToMoveStr = m.group('memmult_2')
-      self.bytesBeingMovedStr = m.group('memmult_3') 
-    else:
-      raise AmbariException("Failed to parse line [%s]") 
-  
-  def parseMemory(self, memorySize, multiplier_type):
-    try:
-      factor = self.MEMORY_SUFFIX.index(multiplier_type)
-    except ValueError:
-      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
-    
-    return float(memorySize) * (1024 ** factor)
-  def toJson(self):
-    return {
-            'timeStamp' : self.date,
-            'iteration' : self.iteration,
-            
-            'dataMoved': self.bytesAlreadyMovedStr,
-            'dataLeft' : self.bytesLeftToMoveStr,
-            'dataBeingMoved': self.bytesBeingMovedStr,
-            
-            'bytesMoved': self.bytesAlreadyMoved,
-            'bytesLeft' : self.bytesLeftToMove,
-            'bytesBeingMoved': self.bytesBeingMoved,
-          }
-  def __str__(self):
-    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index c650c4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              recursive_permission=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group)
-  elif action == "start" or action == "stop":
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index f664bcd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.format import format
-
-from utils import service
-from hdfs import hdfs
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-journalnode {version}"))
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              recursive_permission=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    env.set_params(params)
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index c8a460f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-import json
-import subprocess
-from datetime import datetime
-
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-
-from hdfs_namenode import namenode
-from hdfs import hdfs
-import hdfs_rebalance
-from utils import failover_namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    #TODO we need this for HA because of manual steps
-    self.configure(env)
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-namenode {version}"))
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    namenode(action="start", rolling_restart=rolling_restart, env=env)
-
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    Execute("hdfs dfsadmin -report -live",
-            user=params.hdfs_principal_name if params.security_enabled else params.hdfs_user
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    if rolling_restart and params.dfs_ha_enabled:
-      if params.dfs_ha_automatic_failover_enabled:
-        failover_namenode()
-      else:
-        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
-
-    namenode(action="stop", rolling_restart=rolling_restart, env=env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    namenode(action="configure", env=env)
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-  
-    
-  def rebalancehdfs(self, env):
-    import params
-    env.set_params(params)
-
-    name_node_parameters = json.loads( params.name_node_params )
-    threshold = name_node_parameters['threshold']
-    _print("Starting balancer with threshold = %s\n" % threshold)
-    
-    def calculateCompletePercent(first, current):
-      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
-    
-    
-    def startRebalancingProcess(threshold):
-      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
-      return as_user(rebalanceCommand, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
-    
-    command = startRebalancingProcess(threshold)
-    
-    basedir = os.path.join(env.config.basedir, 'scripts')
-    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
-      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['python','hdfs-command.py']
-    
-    _print("Executing command %s\n" % command)
-    
-    parser = hdfs_rebalance.HdfsParser()
-    proc = subprocess.Popen(
-                            command, 
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE,
-                            shell=True,
-                            close_fds=True,
-                            cwd=basedir
-                           )
-    for line in iter(proc.stdout.readline, ''):
-      _print('[balancer] %s %s' % (str(datetime.now()), line ))
-      pl = parser.parseLine(line)
-      if pl:
-        res = pl.toJson()
-        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
-        
-        self.put_structured_out(res)
-      elif parser.state == 'PROCESS_FINISED' : 
-        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
-        self.put_structured_out({'completePercent' : 1})
-        break
-    
-    proc.stdout.close()
-    proc.wait()
-    if proc.returncode != None and proc.returncode != 0:
-      raise Fail('Hdfs rebalance process exited with error. See the log output')
-      
-def _print(line):
-  sys.stdout.write(line)
-  sys.stdout.flush()
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 12353de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,289 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-import status_params
-import utils
-import os
-import itertools
-import re
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = status_params.hdfs_user
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
-secure_dn_ports_are_in_use = False
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
-  hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"    # TODO Rolling Upgrade, switch from hadoop-client to server when starting daemon.
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_home = "/usr/hdp/current/hadoop-client"
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = utils.get_port(dfs_dn_addr)
-    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = "/usr/lib/hadoop"
-  hadoop_secure_dn_user = hdfs_user
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-limits_conf_dir = "/etc/security/limits.d"
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited && "
-
-#security params
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-falcon_user = config['configurations']['falcon-env']['falcon_user']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-webhcat_user = config['configurations']['hive-env']['hcat_user']
-hcat_user = config['configurations']['hive-env']['hcat_user']
-hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
-
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-namenode_formatted_old_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
-
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
-
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")])
-
-data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
-
-namenode_id = None
-namenode_rpc = None
-
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-  
-  
-if security_enabled:
-  _dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-  _dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-  _dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
-  
-  dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
-  
-  _nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-  _nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-  _nn_principal_name = _nn_principal_name.replace('_HOST',hostname.lower())
-  
-  nn_kinit_cmd = format("{kinit_path_local} -kt {_nn_keytab} {_nn_principal_name};")  
-else:
-  dn_kinit_cmd = ""
-  nn_kinit_cmd = ""  
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-
-io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
-lzo_enabled = "com.hadoop.compression.lzo" in io_compression_codecs
-# stack_is_hdp22_or_further
-underscorred_version = stack_version_unformatted.replace('.', '_')
-dashed_version = stack_version_unformatted.replace('.', '-')
-lzo_packages_to_family = {
-  "any": ["hadoop-lzo"],
-  "redhat": ["lzo", "hadoop-lzo-native"],
-  "suse": ["lzo", "hadoop-lzo-native"],
-  "ubuntu": ["liblzo2-2"]
-}
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
-
-lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
-all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
- 
-exclude_packages = []
-if not lzo_enabled:
-  exclude_packages += all_lzo_packages
-  
-name_node_params = default("/commandParams/namenode", None)
-
-#hadoop params
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/bigtop-utils"
-else:
-  jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 3dc3a1b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = functions.get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir}")
-    chmod_command = format("fs -chmod 777 {dir}")
-    test_dir_exists = as_user(format("{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}"), params.smoke_user)
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_user}"),
-        user=params.smoke_user
-      )
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=20,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(chmod_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port}")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName),
-           mode=0775)
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5,
-              user=params.smoke_user
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 7106422..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-
-
-class SNameNode(Script):
-
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env, params.exclude_packages)
-
-  def pre_rolling_restart(self, env):
-    # Secondary namenode is actually removed in an HA cluster, which is a pre-requisite for Rolling Upgrade,
-    # so it does not need any Rolling Restart logic.
-    pass
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 0027a4c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 6f421b6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,230 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import re
-
-from resource_management import *
-from resource_management.libraries.functions.format import format
-from resource_management.core.shell import call, checked_call
-from resource_management.core.exceptions import ComponentIsNotRunning
-
-from zkfc_slave import ZkfcSlave
-
-def safe_zkfc_op(action, env):
-  """
-  Idempotent operation on the zkfc process to either start or stop it.
-  :param action: start or stop
-  :param env: environment
-  """
-  zkfc = None
-  if action == "start":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      if zkfc:
-        zkfc.start(env)
-
-  if action == "stop":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      pass
-    else:
-      if zkfc:
-        zkfc.stop(env)
-
-
-def failover_namenode():
-  """
-  Failover the primary namenode by killing zkfc if it exists on this host (assuming this host is the primary).
-  """
-  import params
-  check_service_cmd = format("hdfs haadmin -getServiceState {namenode_id}")
-  code, out = call(check_service_cmd, verbose=True, logoutput=True, user=params.hdfs_user)
-
-  state = "unknown"
-  if code == 0 and out:
-    state = "active" if "active" in out else ("standby" if "standby" in out else state)
-    Logger.info("Namenode service state: %s" % state)
-
-  if state == "active":
-    Logger.info("Rolling Upgrade - Initiating namenode failover by killing zkfc on active namenode")
-
-    # Forcefully kill ZKFC on this host to initiate a failover
-    kill_zkfc(params.hdfs_user)
-
-    # Wait until it transitions to standby
-    check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
-    Execute(check_standby_cmd,
-            user=params.hdfs_user,
-            tries=30,
-            try_sleep=6,
-            logoutput=True)
-  else:
-    Logger.info("Rolling Upgrade - Host %s is the standby namenode." % str(params.hostname))
-
-
-def kill_zkfc(zkfc_user):
-  """
-  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
-  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
-  :param zkfc_user: User that started the ZKFC process.
-  """
-  import params
-  if params.dfs_ha_enabled:
-    zkfc_pid_file = get_service_pid_file("zkfc", zkfc_user)
-    if zkfc_pid_file:
-      check_process = format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1")
-      code, out = call(check_process, verbose=True)
-      if code == 0:
-        Logger.debug("ZKFC is running and will be killed to initiate namenode failover.")
-        kill_command = format("{check_process} && kill -9 `cat {zkfc_pid_file}` > /dev/null 2>&1")
-        checked_call(kill_command, verbose=True)
-
-
-def get_service_pid_file(name, user):
-  """
-  Get the pid file path that was used to start the service by the user.
-  :param name: Service name
-  :param user: User that started the service.
-  :return: PID file path
-  """
-  import params
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  return pid_file
-
-
-def service(action=None, name=None, user=None, options="", create_pid_dir=False,
-            create_log_dir=False):
-  """
-  :param action: Either "start" or "stop"
-  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
-  :param user: User to run the command as
-  :param options: Additional options to pass to command as a string
-  :param create_pid_dir: Create PID directory
-  :param create_log_dir: Crate log file directory
-  """
-  import params
-
-  options = options if options else ""
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  check_process = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps -p `cat {pid_file}` >/dev/null 2>&1")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  hadoop_env_exports = {
-    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
-  }
-
-  if params.security_enabled and name == "datanode":
-    ## The directory where pid files are stored in the secure data environment.
-    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
-    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-    if action == 'stop' and (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) and \
-      os.path.isfile(hadoop_secure_dn_pid_file):
-        # We need special handling for this case to handle the situation
-        # when we configure non-root secure DN and then restart it
-        # to handle new configs. Otherwise we will not be able to stop
-        # a running instance 
-        user = "root"
-        
-        try:
-          check_process_status(hadoop_secure_dn_pid_file)
-          
-          custom_export = {
-            'HADOOP_SECURE_DN_USER': params.hdfs_user
-          }
-          hadoop_env_exports.update(custom_export)
-          
-        except ComponentIsNotRunning:
-          pass
-
-  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
-
-  if user == "root":
-    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
-    if options:
-      cmd += [options, ]
-    daemon_cmd = as_sudo(cmd)
-  else:
-    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
-    if options:
-      cmd += " " + options
-    daemon_cmd = as_user(cmd, user)
-     
-  service_is_up = check_process if action == "start" else None
-  #remove pid file from dead process
-  File(pid_file,
-       action="delete",
-       not_if=check_process
-  )
-  Execute(daemon_cmd,
-          not_if=service_is_up,
-          environment=hadoop_env_exports
-  )
-
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-    )
-
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index ee8b418..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.check_process_status import check_process_status
-
-import utils  # this is needed to avoid a circular dependency since utils.py calls this class
-from hdfs import hdfs
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    utils.service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    utils.service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
deleted file mode 100644
index d58a6f5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
deleted file mode 100644
index 5344414..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
+++ /dev/null
@@ -1,58 +0,0 @@
-{
-  "ZOOKEEPER": {
-    "service": [
-      {
-        "name": "zookeeper_server_process_percent",
-        "label": "Percent ZooKeeper Servers Available",
-        "description": "This alert is triggered if the number of down ZooKeeper servers in the cluster is greater than the configured critical threshold. It aggregates the results of ZooKeeper process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "zookeeper_server_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.35
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.70
-            }
-          }
-        }
-      }  
-    ],
-    "ZOOKEEPER_SERVER": [
-      {
-        "name": "zookeeper_server_process",
-        "label": "ZooKeeper Server Process",
-        "description": "This host-level alert is triggered if the ZooKeeper server process cannot be determined to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{zookeeper-env/clientPort}}",
-          "default_port": 2181,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file