You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mu...@apache.org on 2017/02/24 04:53:06 UTC

ambari git commit: AMBARI-20047 Repository config user needs to set customized service user in Ranger when service plugin is enabled (Vishal Suvagia via mugdha)

Repository: ambari
Updated Branches:
  refs/heads/trunk 7d1e04b39 -> 14a99627b


AMBARI-20047 Repository config user needs to set customized service user in Ranger when service plugin is enabled (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/14a99627
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/14a99627
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/14a99627

Branch: refs/heads/trunk
Commit: 14a99627bae2eeb108cfd1b45b953d3c667e8006
Parents: 7d1e04b
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Wed Feb 22 20:14:47 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Fri Feb 24 10:22:24 2017 +0530

----------------------------------------------------------------------
 .../functions/setup_ranger_plugin_xml.py        |   1 +
 .../ranger-hdfs-plugin-properties.xml           |  10 +
 .../HDFS/3.0.0.3.0/service_advisor.py           |  24 +
 .../ranger-yarn-plugin-properties.xml           |  10 +
 .../YARN/3.0.0.3.0/service_advisor.py           |  19 +
 .../ranger-atlas-plugin-properties.xml          |  20 +
 .../ranger-hbase-plugin-properties.xml          |  17 +
 .../ranger-hdfs-plugin-properties.xml           |  18 +
 .../ranger-hive-plugin-properties.xml           |  18 +
 .../ranger-kafka-plugin-properties.xml          |  17 +
 .../ranger-yarn-plugin-properties.xml           |  17 +
 .../stacks/HDP/2.6/services/stack_advisor.py    | 119 +++-
 .../HDP/3.0/properties/stack_features.json      |  45 ++
 .../stacks/2.6/common/test_stack_advisor.py     | 539 ++++++++++++++++++-
 14 files changed, 867 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 56c46dd..04a5bb1 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -88,6 +88,7 @@ def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
             if 'serviceName' in json_data and json_data['serviceName'] == repo_name:
               service_name_exist = True
               Logger.info("Skipping Ranger API calls, as policy cache file exists for {0}".format(service_name))
+              Logger.warning("If service name for {0} is not created on Ranger Admin UI, then to re-create it delete policy cache file: {1}".format(service_name, policycache_json_file))
               break
     except Exception, err:
       Logger.error("Error occurred while fetching service name from policy cache file.\nError: {0}".format(err))

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
index eb7e0bb..539a109 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
@@ -69,6 +69,16 @@
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin
     </description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>hdfs_user</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/service_advisor.py
index 2291c94..a37ebc67 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/service_advisor.py
@@ -138,6 +138,7 @@ class HDFSServiceAdvisor(service_advisor.ServiceAdvisor):
     recommender = HDFSRecommender()
     recommender.recommendConfigurationsFromHDP206(configurations, clusterData, services, hosts)
     recommender.recommendConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendConfigurationsFromHDP26(configurations, clusterData, services, hosts)
 
   def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
     """
@@ -272,6 +273,29 @@ class HDFSRecommender(service_advisor.ServiceAdvisor):
     else:
       putHdfsSitePropertyAttribute('dfs.namenode.inode.attributes.provider.class', 'delete', 'true')
 
+  def recommendConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
+    """
+    Recommend configurations for this service based on HDP 2.6
+    """
+    if 'hadoop-env' in services['configurations'] and 'hdfs_user' in  services['configurations']['hadoop-env']['properties']:
+      hdfs_user = services['configurations']['hadoop-env']['properties']['hdfs_user']
+    else:
+      hdfs_user = 'hadoop'
+
+    if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in configurations['ranger-hdfs-plugin-properties']['properties']:
+      ranger_hdfs_plugin_enabled = (configurations['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+      ranger_hdfs_plugin_enabled = (services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_hdfs_plugin_enabled = False
+
+    if ranger_hdfs_plugin_enabled and 'ranger-hdfs-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+      Logger.info("Setting HDFS Repo user for Ranger.")
+      putRangerHDFSPluginProperty = self.putProperty(configurations, "ranger-hdfs-plugin-properties", services)
+      putRangerHDFSPluginProperty("REPOSITORY_CONFIG_USERNAME",hdfs_user)
+    else:
+      Logger.info("Not setting HDFS Repo user for Ranger.")
+
 
 class HDFSValidator(service_advisor.ServiceAdvisor):
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
index 97867cc..db65f17 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
@@ -66,6 +66,16 @@
     <value>yarn</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_user</name>
+      </property>
+    </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 6e4e55f..e1e03c1 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -449,6 +449,25 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
+      yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
+    else:
+      yarn_user = 'yarn'
+    if 'ranger-yarn-plugin-properties' in configurations and 'ranger-yarn-plugin-enabled' in configurations['ranger-yarn-plugin-properties']['properties']:
+      ranger_yarn_plugin_enabled = (configurations['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-yarn-plugin-properties' in services['configurations'] and 'ranger-yarn-plugin-enabled' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      ranger_yarn_plugin_enabled = (services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_yarn_plugin_enabled = False
+
+    if ranger_yarn_plugin_enabled and 'ranger-yarn-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      Logger.info("Setting Yarn Repo user for Ranger.")
+      putRangerYarnPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
+      putRangerYarnPluginProperty("REPOSITORY_CONFIG_USERNAME",yarn_user)
+    else:
+      Logger.info("Not setting Yarn Repo user for Ranger.")
+
+
   #region LLAP
   def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
index d8885e5..c53f203 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
@@ -67,5 +67,25 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>admin</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>admin</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
index d8885e5..d19aa1c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -67,5 +67,22 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
index 6e93879..d8639fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -67,4 +67,22 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>hdfs_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/ranger-hive-plugin-properties.xml
index d8885e5..cc250bb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/ranger-hive-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -67,5 +67,23 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hive</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+      <property>
+        <type>hive-env</type>
+        <name>hive_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
index d8885e5..8b72e7d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
@@ -67,5 +67,22 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>kafka</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+      <property>
+        <type>kafka-env</type>
+        <name>kafka_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/ranger-yarn-plugin-properties.xml
index d8885e5..c2e5c75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -67,5 +67,22 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 3dd1bdd..0968d8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -30,11 +30,16 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
   def getServiceConfigurationRecommenderDict(self):
       parentRecommendConfDict = super(HDP26StackAdvisor, self).getServiceConfigurationRecommenderDict()
       childRecommendConfDict = {
-          "DRUID": self.recommendDruidConfigurations,
-          "ATLAS": self.recommendAtlasConfigurations,
-          "TEZ": self.recommendTezConfigurations,
-          "RANGER": self.recommendRangerConfigurations,
-          "RANGER_KMS": self.recommendRangerKMSConfigurations
+        "DRUID": self.recommendDruidConfigurations,
+        "ATLAS": self.recommendAtlasConfigurations,
+        "TEZ": self.recommendTezConfigurations,
+        "RANGER": self.recommendRangerConfigurations,
+        "RANGER_KMS": self.recommendRangerKMSConfigurations,
+        "HDFS": self.recommendHDFSConfigurations,
+        "HIVE": self.recommendHIVEConfigurations,
+        "HBASE": self.recommendHBASEConfigurations,
+        "YARN": self.recommendYARNConfigurations,
+        "KAFKA": self.recommendKAFKAConfigurations
       }
       parentRecommendConfDict.update(childRecommendConfDict)
       return parentRecommendConfDict
@@ -151,6 +156,24 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
+      yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
+    else:
+      yarn_user = 'yarn'
+    if 'ranger-yarn-plugin-properties' in configurations and 'ranger-yarn-plugin-enabled' in configurations['ranger-yarn-plugin-properties']['properties']:
+      ranger_yarn_plugin_enabled = (configurations['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-yarn-plugin-properties' in services['configurations'] and 'ranger-yarn-plugin-enabled' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      ranger_yarn_plugin_enabled = (services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_yarn_plugin_enabled = False
+
+    if ranger_yarn_plugin_enabled and 'ranger-yarn-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      Logger.info("Setting Yarn Repo user for Ranger.")
+      putRangerYarnPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
+      putRangerYarnPluginProperty("REPOSITORY_CONFIG_USERNAME",yarn_user)
+    else:
+      Logger.info("Not setting Yarn Repo user for Ranger.")
+
   def getMetadataConnectionString(self, database_type):
       driverDict = {
           'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
@@ -320,4 +343,88 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     if ranger_kms_ssl_enabled:
       putRangerKmsEnvProperty("kms_port", ranger_kms_ssl_port)
     else:
-      putRangerKmsEnvProperty("kms_port", "9292")
\ No newline at end of file
+      putRangerKmsEnvProperty("kms_port", "9292")
+
+  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP26StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+    if 'hadoop-env' in services['configurations'] and 'hdfs_user' in  services['configurations']['hadoop-env']['properties']:
+      hdfs_user = services['configurations']['hadoop-env']['properties']['hdfs_user']
+    else:
+      hdfs_user = 'hadoop'
+
+    if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in configurations['ranger-hdfs-plugin-properties']['properties']:
+      ranger_hdfs_plugin_enabled = (configurations['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+      ranger_hdfs_plugin_enabled = (services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_hdfs_plugin_enabled = False
+
+    if ranger_hdfs_plugin_enabled and 'ranger-hdfs-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+      Logger.info("Setting HDFS Repo user for Ranger.")
+      putRangerHDFSPluginProperty = self.putProperty(configurations, "ranger-hdfs-plugin-properties", services)
+      putRangerHDFSPluginProperty("REPOSITORY_CONFIG_USERNAME",hdfs_user)
+    else:
+      Logger.info("Not setting HDFS Repo user for Ranger.")
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP26StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    if 'hive-env' in services['configurations'] and 'hive_user' in services['configurations']['hive-env']['properties']:
+      hive_user = services['configurations']['hive-env']['properties']['hive_user']
+    else:
+      hive_user = 'hive'
+
+    if 'hive-env' in configurations and 'hive_security_authorization' in configurations['hive-env']['properties']:
+      ranger_hive_plugin_enabled = (configurations['hive-env']['properties']['hive_security_authorization'].lower() == 'ranger')
+    elif 'hive-env' in services['configurations'] and 'hive_security_authorization' in services['configurations']['hive-env']['properties']:
+      ranger_hive_plugin_enabled = (services['configurations']['hive-env']['properties']['hive_security_authorization'].lower() == 'ranger')
+    else :
+      ranger_hive_plugin_enabled = False
+
+    if ranger_hive_plugin_enabled and 'ranger-hive-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hive-plugin-properties']['properties']:
+      Logger.info("Setting Hive Repo user for Ranger.")
+      putRangerHivePluginProperty = self.putProperty(configurations, "ranger-hive-plugin-properties", services)
+      putRangerHivePluginProperty("REPOSITORY_CONFIG_USERNAME",hive_user)
+    else:
+      Logger.info("Not setting Hive Repo user for Ranger.")
+
+  def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP26StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
+    if 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties']:
+      hbase_user = services['configurations']['hbase-env']['properties']['hbase_user']
+    else:
+      hbase_user = 'hbase'
+
+    if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in configurations['ranger-hbase-plugin-properties']['properties']:
+      ranger_hbase_plugin_enabled = (configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      ranger_hbase_plugin_enabled = (services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_hbase_plugin_enabled = False
+
+    if ranger_hbase_plugin_enabled and 'ranger-hbase-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      Logger.info("Setting Hbase Repo user for Ranger.")
+      putRangerHbasePluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
+      putRangerHbasePluginProperty("REPOSITORY_CONFIG_USERNAME",hbase_user)
+    else:
+      Logger.info("Not setting Hbase Repo user for Ranger.")
+
+  def recommendKAFKAConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP26StackAdvisor, self).recommendKAFKAConfigurations(configurations, clusterData, services, hosts)
+    if 'kafka-env' in services['configurations'] and 'kafka_user' in services['configurations']['kafka-env']['properties']:
+      kafka_user = services['configurations']['kafka-env']['properties']['kafka_user']
+    else:
+      kafka_user = "kafka"
+
+    if 'ranger-kafka-plugin-properties' in configurations and  'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
+      ranger_kafka_plugin_enabled = (configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-kafka-plugin-properties' in services['configurations'] and 'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+      ranger_kafka_plugin_enabled = (services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_kafka_plugin_enabled = False
+
+    if ranger_kafka_plugin_enabled and 'ranger-kafka-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+      Logger.info("Setting Kafka Repo user for Ranger.")
+      putRangerKafkaPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
+      putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
+    else:
+      Logger.info("Not setting Kafka Repo user for Ranger.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
index d64598a..932dfc9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -322,6 +322,51 @@
       "name": "phoenix_core_hdfs_site_required",
       "description": "HDFS and CORE site required for Phoenix",
       "max_version": "2.5.9.9"
+    },
+    {
+      "name": "ranger_tagsync_ssl_xml_support",
+      "description": "Ranger Tagsync ssl xml support.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "ranger_xml_configuration",
+      "description": "Ranger code base support xml configurations",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_ranger_plugin_support",
+      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "yarn_ranger_plugin_support",
+      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_solr_config_support",
+      "description": "Showing Ranger solrconfig.xml on UI",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "core_site_for_ranger_plugins",
+      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "secure_ranger_ssl_password",
+      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "ranger_kms_ssl",
+      "description": "Ranger KMS SSL properties in ambari stack",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "atlas_hdfs_site_on_namenode_ha",
+      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+      "min_version": "2.6.0.0"
     }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/14a99627/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index c15eaf1..5bfa1a9 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -20,7 +20,7 @@ import json
 import os
 from unittest import TestCase
 from mock.mock import patch
-
+import socket
 
 class TestHDP26StackAdvisor(TestCase):
   def setUp(self):
@@ -849,6 +849,543 @@ class TestHDP26StackAdvisor(TestCase):
     self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
     self.assertEquals(recommendedConfigurations, expected)
 
+  def test_recommendHDFSConfigurations(self):
+    ambariHostName = socket.getfqdn()
+    configurations = {
+      "ranger-hdfs-plugin-properties": {
+        "properties": {
+          "ranger-hdfs-plugin-enabled": "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"hadoop"
+        }
+      },
+      "hadoop-env":{
+        "properties":{
+          "hdfs_user":"custom_hdfs"
+        }
+      }
+    }
+    clusterData = {
+      "totalAvailableRam": 2048,
+      "hBaseInstalled": True,
+      "hbaseRam": 112,
+      "reservedRam": 128
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "disk_info": [{
+              "size": '8',
+              "mountpoint": "/"
+            }]
+          }
+        }]}
+
+    services = {
+      "services":
+        [
+          {
+            "StackServices": {
+              "service_name" : "HDFS",
+              "service_version" : "2.7.0.2.6"
+            },
+            "components": [
+            ]
+          }
+        ],
+      "Versions": {
+        "stack_version": "2.6"
+      },
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+
+
+    expected = {
+      'core-site': {
+        'properties': {
+          'hadoop.proxyuser.ambari_user.groups': '*',
+          'hadoop.proxyuser.custom_hdfs.groups': '*',
+          'hadoop.proxyuser.custom_hdfs.hosts': '*',
+          'hadoop.proxyuser.ambari_user.hosts': ambariHostName
+        },
+        'property_attributes': {
+          'hadoop.security.key.provider.path': {
+            'delete': 'true'
+          }
+        }
+      },
+      'hadoop-env': {
+        'properties': {
+          'hdfs_user': 'custom_hdfs',
+          'namenode_heapsize': '1024',
+          'namenode_opt_maxnewsize': '128',
+          'namenode_opt_newsize': '128'
+        }
+      },
+      'hdfs-site': {
+        'properties': {
+          'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+          'dfs.datanode.failed.volumes.tolerated': '0',
+          'dfs.datanode.max.transfer.threads': '16384',
+          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+          'dfs.namenode.handler.count': '100',
+          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
+          'dfs.namenode.safemode.threshold-pct': '1.000',
+          'dfs.namenode.inode.attributes.provider.class': 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer',
+          'dfs.datanode.du.reserved': '1073741824'
+        },
+        'property_attributes': {
+          'dfs.datanode.failed.volumes.tolerated': {
+            'maximum': '1'
+          },
+          'dfs.encryption.key.provider.uri': {
+            'delete': 'true'
+          }
+        }
+      },
+      'ranger-hdfs-plugin-properties': {
+        'properties': {
+          'ranger-hdfs-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_hdfs'
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,expected)
+    configurations['hadoop-env']['properties']['hdfs_user'] = 'hadoop'
+    expected['hadoop-env']['properties']['hdfs_user'] = 'hadoop'
+    expected['ranger-hdfs-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'hadoop'
+    expected['core-site']['properties']['hadoop.proxyuser.hadoop.hosts'] = '*'
+    expected['core-site']['properties']['hadoop.proxyuser.hadoop.groups'] = '*'
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,expected)
+
+
+  def test_recommendHiveConfigurations(self):
+    configurations = {
+      "ranger-hive-plugin-properties": {
+        "properties": {
+          "ranger-hive-plugin-enabled": "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"hive"
+        }
+      },
+      "hive-env":{
+        "properties":{
+          "hive_security_authorization":"ranger",
+          "hive_user":"custom_hive"
+        }
+      }
+    }
+    clusterData = {
+      "cpu": 4,
+      "mapMemory": 3000,
+      "amMemory": 2000,
+      "reduceMemory": 2056,
+      "containers": 3,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+
+    hosts = {}
+
+    services = {
+      "services":
+        [
+         {
+            "StackServices": {
+              "service_name" : "HIVE",
+              "service_version" : "1.2.1.2.6"
+            },
+            "components": [
+            ]
+          }
+        ],
+      "Versions": {
+        "stack_name" : "HDP",
+        "stack_version": "2.6"
+      },
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+
+
+    expected = {
+      'yarn-env': {
+        'properties': {
+          'min_user_id': '500',
+          'service_check.queue.name': 'default'
+        }
+      },
+      'ranger-hive-plugin-properties': {
+        'properties': {
+          'ranger-hive-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_hive'
+        }
+      },
+      'webhcat-site': {
+        'properties': {
+          'templeton.hadoop.queue.name': 'default'
+        }
+      },
+      'hive-interactive-env': {
+        'properties': {
+          'enable_hive_interactive': 'false'
+        },
+        'property_attributes': {
+          'num_llap_nodes': {
+            'read_only': 'true'
+          }
+        }
+      },
+      'hive-env': {
+        'properties': {
+          'hive.atlas.hook': 'false',
+          'hive_security_authorization': 'ranger',
+          'hive_exec_orc_storage_strategy': 'SPEED',
+          'hive_timeline_logging_enabled': 'true',
+          'hive_txn_acid': 'off',
+          'hive_user': 'custom_hive'
+        }
+      },
+      'hiveserver2-site': {
+        'properties': {
+          'hive.security.authorization.enabled': 'true',
+          'hive.conf.restricted.list': 'hive.security.authenticator.manager,hive.security.authorization.manager,hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager,hive.users.in.admin.role,hive.server2.xsrf.filter.enabled,hive.security.authorization.enabled',
+          'hive.security.authenticator.manager': 'org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator',
+          'hive.security.authorization.manager': 'org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory'
+        }
+      },
+      'hive-site': {
+        'properties': {
+          'hive.tez.container.size': '768',
+          'hive.exec.orc.default.stripe.size': '67108864',
+          'hive.execution.engine': 'mr',
+          'hive.vectorized.execution.reduce.enabled': 'false',
+          'hive.compactor.worker.threads': '0',
+          'hive.compactor.initiator.on': 'false',
+          'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
+          'hive.compute.query.using.stats': 'true',
+          'hive.exec.orc.default.compress': 'ZLIB',
+          'hive.exec.orc.encoding.strategy': 'SPEED',
+          'hive.server2.tez.initialize.default.sessions': 'false',
+          'hive.security.authorization.enabled': 'true',
+          'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
+          'hive.server2.tez.default.queues': 'default',
+          'hive.prewarm.enabled': 'false',
+          'hive.exec.orc.compression.strategy': 'SPEED',
+          'hive.optimize.index.filter': 'true',
+          'hive.auto.convert.join.noconditionaltask.size': '214748364',
+          'hive.vectorized.execution.enabled': 'true',
+          'hive.exec.reducers.bytes.per.reducer': '67108864',
+          'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
+          'hive.server2.tez.sessions.per.default.queue': '1',
+          'hive.prewarm.numcontainers': '3',
+          'hive.tez.dynamic.partition.pruning': 'true',
+          'hive.tez.auto.reducer.parallelism': 'true',
+          'hive.server2.use.SSL': 'false',
+          'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
+          'hive.support.concurrency': 'false',
+          'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
+          'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
+          'hive.exec.dynamic.partition.mode': 'strict',
+          'hive.optimize.sort.dynamic.partition': 'false',
+          'hive.server2.enable.doAs': 'false'
+        },
+        'property_attributes': {
+          'hive.tez.container.size': {
+            'minimum': '256',
+            'maximum': '768'
+          },
+          'atlas.cluster.name': {
+            'delete': 'true'
+          },
+          'hive.server2.tez.default.queues': {
+            'entries': [
+              {
+                'value': 'default',
+                'label': 'default queue'
+              }
+            ]
+          },
+          'datanucleus.rdbms.datastoreAdapterClassName': {
+            'delete': 'true'
+          },
+          'hive.auto.convert.join.noconditionaltask.size': {
+            'maximum': '644245094'
+          },
+          'atlas.rest.address': {
+            'delete': 'true'
+          }
+        }
+      },
+      'hive-interactive-site': {
+        'properties': {}
+      },
+      'yarn-site': {
+        'properties': {
+          'hadoop.registry.rm.enabled': 'false',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
+          'yarn.scheduler.minimum-allocation-vcores': '1',
+          'yarn.scheduler.maximum-allocation-vcores': '4',
+          'yarn.nodemanager.resource.memory-mb': '768',
+          'yarn.scheduler.minimum-allocation-mb': '256',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.nodemanager.resource.cpu-vcores': '4',
+          'yarn.scheduler.maximum-allocation-mb': '768',
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+        },
+        'property_attributes': {
+          'yarn.authorization-provider': {
+            'delete': 'true'
+          }
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,expected)
+    configurations['hive-env']['properties']['hive_user'] = 'hive'
+    expected['hive-env']['properties']['hive_user'] = 'hive'
+    expected['ranger-hive-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'hive'
+    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,expected)
+
+
+  def test_recommendHBASEConfigurations(self):
+    configurations = {
+      "ranger-hbase-plugin-properties": {
+        "properties": {
+          "ranger-hbase-plugin-enabled": "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"hbase"
+        }
+      },
+      "hbase-env":{
+        "properties":{
+          "hbase_user":"custom_hbase"
+        }
+      }
+    }
+
+    services = {
+      "services": [{
+            "StackServices": {
+              "service_name" : "HBASE",
+              "service_version" : "1.1.2.2.6"
+            },
+            "components": [
+            ]
+          }
+        ],
+      "Versions": {
+        "stack_name" : "HDP",
+        "stack_version": "2.6"
+      },
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+
+
+    clusterData = {
+      "totalAvailableRam": 2048,
+      "hBaseInstalled": True,
+      "hbaseRam": 112,
+      "reservedRam": 128
+    }
+    expected = {
+      'hbase-site': {
+        'properties': {
+          'hbase.regionserver.wal.codec': 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec',
+          'hbase.master.ui.readonly': 'false',
+          'hbase.security.authorization': 'true',
+          'hbase.bucketcache.percentage.in.combinedcache': '1.0000',
+          'hbase.regionserver.global.memstore.size': '0.4',
+          'hfile.block.cache.size': '0.4',
+          'hbase.coprocessor.region.classes': 'org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor',
+          'hbase.bucketcache.size': '92160',
+          'hbase.coprocessor.regionserver.classes': 'org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor',
+          'hbase.coprocessor.master.classes': 'org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor',
+          'hbase.bucketcache.ioengine': 'offheap'
+        },
+        'property_attributes': {
+          'hbase.bucketcache.percentage.in.combinedcache': {
+            'delete': 'true'
+          },
+          'hbase.region.server.rpc.scheduler.factory.class': {
+            'delete': 'true'
+          }
+        }
+      },
+      'ranger-hbase-plugin-properties': {
+        'properties': {
+          'REPOSITORY_CONFIG_USERNAME': 'custom_hbase',
+          'ranger-hbase-plugin-enabled': 'Yes'
+        }
+      },
+      'hbase-env': {
+        'properties': {
+          'hbase_user': 'custom_hbase',
+          'hbase_master_heapsize': '1024',
+          'hbase_regionserver_heapsize': '20480',
+          'hbase_max_direct_memory_size': '94208'
+        }
+      },
+      'core-site': {
+        'properties': {}
+      }
+    }
+    self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+    configurations['hbase-env']['properties']['hbase_user'] = 'hbase'
+    expected['hbase-env']['properties']['hbase_user'] = 'hbase'
+    expected['ranger-hbase-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'hbase'
+    self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+  def test_recommendYARNConfigurations(self):
+    configurations = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+    services = {
+      "services" : [{
+        "StackServices": {
+          "service_name" : "YARN",
+          "service_version" : "2.7.3.2.6"
+        },
+        "components": []
+      }
+      ],
+      "configurations": configurations
+    }
+
+
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      'yarn-env': {
+        'properties': {
+          'yarn_user': 'custom_yarn',
+          'service_check.queue.name': 'default',
+          'min_user_id': '500'
+        }
+      },
+      'ranger-yarn-plugin-properties': {
+        'properties': {
+          'ranger-yarn-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_yarn'
+        }
+      },
+      'yarn-site': {
+        'properties': {
+          'hadoop.registry.rm.enabled': 'false',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
+          'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
+          'yarn.acl.enable': 'true',
+          'yarn.scheduler.minimum-allocation-vcores': '1',
+          'yarn.scheduler.maximum-allocation-vcores': '4',
+          'yarn.nodemanager.resource.memory-mb': '1280',
+          'yarn.scheduler.minimum-allocation-mb': '256',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.nodemanager.resource.cpu-vcores': '4',
+          'yarn.scheduler.maximum-allocation-mb': '1280',
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+    configurations['yarn-env']['properties']['yarn_user'] = 'yarn'
+    expected['yarn-env']['properties']['yarn_user'] = 'yarn'
+    expected['ranger-yarn-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'yarn'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+  def test_recommendKAFKAConfigurations(self):
+    configurations = {
+      "kafka-env": {
+        "properties": {
+          "kafka_user" : "custom_kafka"
+        }
+      },
+      "ranger-kafka-plugin-properties": {
+        "properties": {
+          "ranger-kafka-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"kafka"
+        }
+      }
+    }
+    clusterData = []
+    services = {
+      "services" : [{
+        "StackServices": {
+          "service_name" : "KAFKA",
+          "service_version" : "0.10.0.2.6"
+        },
+        "components": []
+      }
+      ],
+      "configurations": configurations
+    }
+
+    expected = {
+      'kafka-env': {
+        'properties': {
+          'kafka_user': 'custom_kafka'
+        }
+      },
+      'kafka-log4j': {
+        'properties': {}
+      },
+      'kafka-broker': {
+        'properties': {},
+        'property_attributes': {
+          'principal.to.local.class': {
+            'delete': 'true'
+          },
+          'super.users': {
+            'delete': 'true'
+          },
+          'security.inter.broker.protocol': {
+            'delete': 'true'
+          },
+          'authorizer.class.name': {
+            'delete': 'true'
+          }
+        }
+      },
+      'ranger-kafka-plugin-properties': {
+        'properties': {
+          'ranger-kafka-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_kafka'
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+    configurations['kafka-env']['properties']['kafka_user'] = 'kafka'
+    expected['kafka-env']['properties']['kafka_user'] = 'kafka'
+    expected['ranger-kafka-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'kafka'
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
 def load_json(self, filename):
   file = os.path.join(self.testDirectory, filename)
   with open(file, 'rb') as f: