You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/03/10 00:50:38 UTC

[46/51] [abbrv] ambari git commit: AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)

AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4788dc27
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4788dc27
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4788dc27

Branch: refs/heads/AMBARI-13364
Commit: 4788dc27841c9416b4b31b0127ad9874711f7d9a
Parents: e5df414
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:26:13 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:15:13 2016 -0800

----------------------------------------------------------------------
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml     |  3 +--
 .../package/alerts/alert_ha_namenode_health.py      |  6 +++---
 .../package/alerts/alert_metrics_deviation.py       |  6 +++---
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/hdfs_client.py   |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/journalnode.py   |  5 +++--
 .../package/scripts/journalnode_upgrade.py          |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/nfsgateway.py    |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/params.py        |  1 +
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py  | 12 ++++++++----
 .../2.1.0.2.0/package/scripts/setup_ranger_hdfs.py  |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py     |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/utils.py         | 16 ++++++++--------
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml  | 15 +++++++++++++++
 15 files changed, 61 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 61eccce..61d503f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -229,8 +229,7 @@ export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
 #TODO: if env var set that can cause problems
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
 
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
+# Path to jsvc required by secure datanode
 export JSVC_HOME={{jsvc_path}}
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
index 70b1970..20d1717 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
@@ -247,7 +247,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -255,7 +255,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -264,7 +264,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index f6a9a56..50a9ecd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -338,7 +338,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -346,7 +346,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -355,7 +355,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 3cdfda9..e3556ff 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -32,7 +32,8 @@ from utils import get_hdfs_binary
 class DataNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-datanode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-datanode"}
 
   def get_hdfs_binary(self):
     """
@@ -87,7 +88,7 @@ class DataNodeDefault(DataNode):
     Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-datanode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index c5ae35e..a56d480 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -55,12 +55,13 @@ class HdfsClient(Script):
 class HdfsClientDefault(HdfsClient):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 6f26b40..0860211 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -43,14 +43,15 @@ class JournalNode(Script):
 class JournalNodeDefault(JournalNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-journalnode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-journalnode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-journalnode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index d598840..193e7d7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -81,7 +81,7 @@ def hdfs_roll_edits():
   """
   import params
 
-  # TODO, this will be to be doc'ed since existing HDP 2.2 clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
+  # TODO, this will be to be doc'ed since existing stack_version_ru_support clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
   dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
   command = dfsadmin_base_command + ' -rollEdits'
   Execute(command, user=params.hdfs_user, tries=1)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 02905ec..2636bbc 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -68,7 +68,8 @@ except ImportError:
 class NameNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-namenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-namenode"}
 
   def get_hdfs_binary(self):
     """
@@ -190,7 +191,7 @@ class NameNodeDefault(NameNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
       # Therefore, we cannot call this code in that scenario.
       call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index c705fca..6386ca5 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -32,7 +32,8 @@ from resource_management.libraries.functions.version import compare_versions, fo
 class NFSGateway(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-nfs3"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-nfs3"}
 
   def install(self, env):
     import params
@@ -45,7 +46,7 @@ class NFSGateway(Script):
     import params
     env.set_params(params)
 
-    if Script.is_stack_greater_or_equal('2.3.0.0'):
+    if Script.is_stack_greater_or_equal(params.stack_version_nfs_support):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-nfs3", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index 7514918..0f3746c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -25,5 +25,6 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+stack_version_nfs_support = config['configurations']['cluster-env']['stack_version_nfs_support']
 nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
 retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f0bf4d2..9d4e3f7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -45,9 +45,13 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_snappy_unsupport = config['configurations']['cluster-env']['stack_version_snappy_unsupport']
+stack_version_ranger_support = config['configurations']['cluster-env']['stack_version_ranger_support']
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
@@ -86,9 +90,9 @@ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
+  mapreduce_libs_path = format("{stack_dir}/current/hadoop-mapreduce-client/*")
 
   if not security_enabled:
     hadoop_secure_dn_user = '""'
@@ -114,7 +118,7 @@ limits_conf_dir = "/etc/security/limits.d"
 hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
 
-create_lib_snappy_symlinks = not Script.is_stack_greater_or_equal("2.2")
+create_lib_snappy_symlinks = not Script.is_stack_greater_or_equal(stack_version_snappy_unsupport)
 jsvc_path = "/usr/lib/bigtop-utils"
 
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index 209ac91..e30ff95 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -60,9 +60,10 @@ def setup_ranger_hdfs(upgrade_type=None):
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
 
+
     if stack_version and params.upgrade_direction == Direction.UPGRADE:
-      # when upgrading to 2.3+, this env file must be removed
-      if compare_versions(stack_version, '2.3', format=True) > 0:
+      # when upgrading to stack_version_ranger_support+, this env file must be removed
+      if compare_versions(stack_version, params.stack_version_ranger_support, format=True) > 0:
         source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
         target_file = source_file + ".bak"
         Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index f96ac01..c19f3ce 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -64,14 +64,15 @@ class SNameNode(Script):
 class SNameNodeDefault(SNameNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-secondarynamenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-secondarynamenode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index c626028..d30eb50 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -134,7 +134,7 @@ def kill_zkfc(zkfc_user):
   """
   There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
   Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
+  Option 2. Silent failover (not supported as of stack_version_ru_support)
   :param zkfc_user: User that started the ZKFC process.
   :return: Return True if ZKFC was killed, otherwise, false.
   """
@@ -224,12 +224,12 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
 
     # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
+    if not (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0) or params.secure_dn_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
-    if action == 'stop' and (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) and \
+    if action == 'stop' and (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0) and \
       os.path.isfile(hadoop_secure_dn_pid_file):
         # We need special handling for this case to handle the situation
         # when we configure non-root secure DN and then restart it
@@ -351,11 +351,11 @@ def get_hdfs_binary(distro_component_name):
   """
   import params
   hdfs_binary = "hdfs"
-  if params.stack_name == "HDP":
-    # This was used in HDP 2.1 and earlier
-    hdfs_binary = "hdfs"
-    if Script.is_stack_greater_or_equal("2.2"):
-      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
+  #if params.stack_name == "HDP":
+  #  # This was used in HDP 2.1 and earlier
+  #  hdfs_binary = "hdfs"
+  if Script.is_stack_greater_or_equal(params.stack_version_ru_support):
+    hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_dir, distro_component_name)
 
   return hdfs_binary
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 8ac1b5b..70a5fbb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -32,6 +32,21 @@
         <description>Stack version from which rolling upgrade is supported and installation layout changed</description>
     </property>
     <property>
+        <name>stack_version_ranger_support</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which ranger is supported</description>
+    </property>
+    <property>
+        <name>stack_version_snappy_unsupport</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which snappy is not supported</description>
+    </property>
+    <property>
+        <name>stack_version_nfs_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which hadoop-hdfs-nfs3 is supported</description>
+    </property>
+    <property>
         <name>security_enabled</name>
         <value>false</value>
         <description>Hadoop Security</description>