You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by di...@apache.org on 2016/02/29 16:27:01 UTC

ambari git commit: AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)

Repository: ambari
Updated Branches:
  refs/heads/AMBARI-13364 7f4975fa5 -> 3e02731ab


AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e02731a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e02731a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e02731a

Branch: refs/heads/AMBARI-13364
Commit: 3e02731abe04f4614108a6750371c5bf369d73de
Parents: 7f4975f
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:26:13 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Mon Feb 29 10:26:13 2016 -0500

----------------------------------------------------------------------
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml     |  3 +--
 .../package/alerts/alert_ha_namenode_health.py      |  6 +++---
 .../package/alerts/alert_metrics_deviation.py       |  6 +++---
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/hdfs_client.py   |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/journalnode.py   |  5 +++--
 .../package/scripts/journalnode_upgrade.py          |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/nfsgateway.py    |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/params.py        |  1 +
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py  | 14 +++++++++-----
 .../2.1.0.2.0/package/scripts/setup_ranger_hdfs.py  | 12 ++++++------
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py     |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/status_params.py | 12 ++++++------
 .../HDFS/2.1.0.2.0/package/scripts/utils.py         | 16 ++++++++--------
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml  | 15 +++++++++++++++
 16 files changed, 71 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 5e43b71..acdde55 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -229,8 +229,7 @@ export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
 #TODO: if env var set that can cause problems
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
 
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
+# Path to jsvc required by secure datanode
 export JSVC_HOME={{jsvc_path}}
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
index 7cd5591..419d976 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
@@ -234,7 +234,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -242,7 +242,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -251,7 +251,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index 217f3b8..70310a3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -330,7 +330,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -338,7 +338,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -347,7 +347,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 5adeab4..1b55e92 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -32,7 +32,8 @@ from utils import get_hdfs_binary
 class DataNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-datanode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-datanode"}
 
   def get_hdfs_binary(self):
     """
@@ -87,7 +88,7 @@ class DataNodeDefault(DataNode):
     Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_hdp_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-datanode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 21c0eda..88e95fb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -55,12 +55,13 @@ class HdfsClient(Script):
 class HdfsClientDefault(HdfsClient):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_hdp_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 7715f6c..f13e43e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -43,14 +43,15 @@ class JournalNode(Script):
 class JournalNodeDefault(JournalNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-journalnode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-journalnode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_hdp_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-journalnode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index d598840..193e7d7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -81,7 +81,7 @@ def hdfs_roll_edits():
   """
   import params
 
-  # TODO, this will be to be doc'ed since existing HDP 2.2 clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
+  # TODO, this will be to be doc'ed since existing stack_version_ru_support clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
   dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
   command = dfsadmin_base_command + ' -rollEdits'
   Execute(command, user=params.hdfs_user, tries=1)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 910bc0a..b0453d4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -68,7 +68,8 @@ except ImportError:
 class NameNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-namenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-namenode"}
 
   def get_hdfs_binary(self):
     """
@@ -190,7 +191,7 @@ class NameNodeDefault(NameNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_hdp_stack_version(params.version), params.stack_version_ru_support) >= 0:
       # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
       # Therefore, we cannot call this code in that scenario.
       call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index 4b9ad06..8fc0ea2 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -32,7 +32,8 @@ from resource_management.libraries.functions.version import compare_versions, fo
 class NFSGateway(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-nfs3"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-nfs3"}
 
   def install(self, env):
     import params
@@ -45,7 +46,7 @@ class NFSGateway(Script):
     import params
     env.set_params(params)
 
-    if Script.is_hdp_stack_greater_or_equal('2.3.0.0'):
+    if Script.is_hdp_stack_greater_or_equal(params.stack_version_nfs_support):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-nfs3", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index 7514918..0f3746c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -25,5 +25,6 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+stack_version_nfs_support = config['configurations']['cluster-env']['stack_version_nfs_support']
 nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
 retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 29c4784..dd86ef0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -44,9 +44,13 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_hdp_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_snappy_unsupport = config['configurations']['cluster-env']['stack_version_snappy_unsupport']
+stack_version_ranger_support = config['configurations']['cluster-env']['stack_version_ranger_support']
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
 version = default("/commandParams/version", None)
@@ -83,9 +87,9 @@ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
 
-# hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+# hadoop parameters for stack_version_ru_support+
+if Script.is_hdp_stack_greater_or_equal(stack_version_ru_support):
+  mapreduce_libs_path = format("{stack_dir}/current/hadoop-mapreduce-client/*")
 
   if not security_enabled:
     hadoop_secure_dn_user = '""'
@@ -111,7 +115,7 @@ limits_conf_dir = "/etc/security/limits.d"
 hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
 
-create_lib_snappy_symlinks = not Script.is_hdp_stack_greater_or_equal("2.2")
+create_lib_snappy_symlinks = not Script.is_hdp_stack_greater_or_equal(stack_version_snappy_unsupport)
 jsvc_path = "/usr/lib/bigtop-utils"
 
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index ff93e39..80a323d 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -34,10 +34,10 @@ def setup_ranger_hdfs(upgrade_type=None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
 
-    hdp_version = None
+    stack_version = None
 
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -58,11 +58,11 @@ def setup_ranger_hdfs(upgrade_type=None):
                         component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
 
-    if hdp_version and params.upgrade_direction == Direction.UPGRADE:
-      # when upgrading to 2.3+, this env file must be removed
-      if compare_versions(hdp_version, '2.3', format=True) > 0:
+    if stack_version_version and params.upgrade_direction == Direction.UPGRADE:
+      # when upgrading to stack_version_ranger_support+, this env file must be removed
+      if compare_versions(stack_version, params.stack_version_ranger_support, format=True) > 0:
         source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
         target_file = source_file + ".bak"
         Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index b8a1726..d012cbf 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -64,14 +64,15 @@ class SNameNode(Script):
 class SNameNodeDefault(SNameNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-secondarynamenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-secondarynamenode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_hdp_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-secondarynamenode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
index 388fa59..cdb683b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
@@ -36,12 +36,12 @@ if OSCheck.is_windows_family():
 else:
   hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
   hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-  hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-  datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-  namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-  snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-  journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-  zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+  datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+  namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+  snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+  journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
   nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
 
   # Security related/required params

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index e59dd78..1a79c0c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -134,7 +134,7 @@ def kill_zkfc(zkfc_user):
   """
   There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
   Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
+  Option 2. Silent failover (not supported as of stack_version_ru_support)
   :param zkfc_user: User that started the ZKFC process.
   :return: Return True if ZKFC was killed, otherwise, false.
   """
@@ -224,12 +224,12 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
 
     # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
+    if not (params.stack_version_unformatted != "" and compare_versions(params.stack_version_unformatted, params.stack_version_ru_support) >= 0) or params.secure_dn_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
-    if action == 'stop' and (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) and \
+    if action == 'stop' and (params.stack_version_unformatted != "" and compare_versions(params.stack_version_unformatted, params.stack_version_ru_support) >= 0) and \
       os.path.isfile(hadoop_secure_dn_pid_file):
         # We need special handling for this case to handle the situation
         # when we configure non-root secure DN and then restart it
@@ -351,11 +351,11 @@ def get_hdfs_binary(distro_component_name):
   """
   import params
   hdfs_binary = "hdfs"
-  if params.stack_name == "HDP":
-    # This was used in HDP 2.1 and earlier
-    hdfs_binary = "hdfs"
-    if Script.is_hdp_stack_greater_or_equal("2.2"):
-      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
+  #if params.stack_name == "HDP":
+  #  # This was used in HDP 2.1 and earlier
+  #  hdfs_binary = "hdfs"
+  if Script.is_hdp_stack_greater_or_equal(params.stack_version_ru_support):
+    hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_dir, distro_component_name)
 
   return hdfs_binary
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e02731a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index ea8dce3..eedaa69 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -32,6 +32,21 @@
         <description>Stack version from which rolling upgrade is supported and installation layout changed</description>
     </property>
     <property>
+        <name>stack_version_ranger_support</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which ranger is supported</description>
+    </property>
+    <property>
+        <name>stack_version_snappy_unsupport</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which snappy is not supported</description>
+    </property>
+    <property>
+        <name>stack_version_nfs_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which hadoop-hdfs-nfs3 is supported</description>
+    </property>
+    <property>
         <name>security_enabled</name>
         <value>false</value>
         <description>Hadoop Security</description>