You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sr...@apache.org on 2016/06/10 22:38:57 UTC

ambari git commit: AMBARI-17164. Handle Ranger upgrade scenario in Kerberized env (Mugdha Varadkar via srimanth)

Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 defe34218 -> 8e3ddf6a8


AMBARI-17164. Handle Ranger upgrade scenario in Kerberized env (Mugdha Varadkar via srimanth)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8e3ddf6a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8e3ddf6a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8e3ddf6a

Branch: refs/heads/branch-2.4
Commit: 8e3ddf6a817cfb0cd83d852ac5808cd6a8c6532b
Parents: defe342
Author: Srimanth Gunturi <sg...@hortonworks.com>
Authored: Fri Jun 10 15:37:15 2016 -0700
Committer: Srimanth Gunturi <sg...@hortonworks.com>
Committed: Fri Jun 10 15:37:15 2016 -0700

----------------------------------------------------------------------
 .../RangerKerberosConfigCalculation.java        | 258 ++++++++++++++++++
 .../0.6.0/configuration/ranger-admin-site.xml   | 108 ++++++++
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |   4 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |   2 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |   4 +
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |   2 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |  21 ++
 .../RangerKerberosConfigCalculationTest.java    | 259 +++++++++++++++++++
 8 files changed, 658 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
new file mode 100644
index 0000000..c3d71c0
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import java.text.MessageFormat;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
+
+import com.google.inject.Inject;
+
+/**
+* Computes Ranger properties when upgrading to HDP-2.5
+*/
+
+public class RangerKerberosConfigCalculation extends AbstractServerAction {
+  private static final String RANGER_ADMIN_SITE_CONFIG_TYPE = "ranger-admin-site";
+  private static final String HADOOP_ENV_CONFIG_TYPE = "hadoop-env";
+  private static final String HIVE_ENV_CONFIG_TYPE = "hive-env";
+  private static final String YARN_ENV_CONFIG_TYPE = "yarn-env";
+  private static final String HBASE_ENV_CONFIG_TYPE = "hbase-env";
+  private static final String KNOX_ENV_CONFIG_TYPE = "knox-env";
+  private static final String STORM_ENV_CONFIG_TYPE = "storm-env";
+  private static final String KAFKA_ENV_CONFIG_TYPE = "kafka-env";
+  private static final String RANGER_KMS_ENV_CONFIG_TYPE = "kms-env";
+  private static final String HDFS_SITE_CONFIG_TYPE = "hdfs-site";
+  private static final String RANGER_SPNEGO_PRINCIPAL = "ranger.spnego.kerberos.principal";
+  private static final String RANGER_SPNEGO_KEYTAB = "ranger.spnego.kerberos.keytab";
+  private static final String RANGER_PLUGINS_HDFS_SERVICE_USER = "ranger.plugins.hdfs.serviceuser";
+  private static final String RANGER_PLUGINS_HIVE_SERVICE_USER = "ranger.plugins.hive.serviceuser";
+  private static final String RANGER_PLUGINS_YARN_SERVICE_USER = "ranger.plugins.yarn.serviceuser";
+  private static final String RANGER_PLUGINS_HBASE_SERVICE_USER = "ranger.plugins.hbase.serviceuser";
+  private static final String RANGER_PLUGINS_KNOX_SERVICE_USER = "ranger.plugins.knox.serviceuser";
+  private static final String RANGER_PLUGINS_STORM_SERVICE_USER = "ranger.plugins.storm.serviceuser";
+  private static final String RANGER_PLUGINS_KAFKA_SERVICE_USER = "ranger.plugins.kafka.serviceuser";
+  private static final String RANGER_PLUGINS_KMS_SERVICE_USER = "ranger.plugins.kms.serviceuser";
+
+  @Inject
+  private Clusters m_clusters;
+
+  @Override
+  public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
+      throws AmbariException, InterruptedException {
+
+    String clusterName = getExecutionCommand().getClusterName();
+    Cluster cluster = m_clusters.getCluster(clusterName);
+    String errMsg = "";
+    String sucessMsg = "";
+
+    Config rangerAdminconfig = cluster.getDesiredConfigByType(RANGER_ADMIN_SITE_CONFIG_TYPE);
+
+    if (null == rangerAdminconfig) {
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+        MessageFormat.format("The {0} configuration was not found; unable to set Ranger configuration properties", RANGER_ADMIN_SITE_CONFIG_TYPE), "");
+    }
+
+    Map<String, String> targetValues = rangerAdminconfig.getProperties();
+
+    // For Hdfs
+    Config hadoopConfig = cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG_TYPE);
+
+    if (null != hadoopConfig) {
+      String hadoopUser = hadoopConfig.getProperties().get("hdfs_user");
+      if (null != hadoopUser) {
+        targetValues.put(RANGER_PLUGINS_HDFS_SERVICE_USER, hadoopUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HDFS_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hdfs_user", HADOOP_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", HADOOP_ENV_CONFIG_TYPE);
+    }
+
+    // For Hive
+    Config hiveConfig = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG_TYPE);
+
+    if (null != hiveConfig) {
+      String hiveUser = hiveConfig.getProperties().get("hive_user");
+      if (null != hiveUser) {
+        targetValues.put(RANGER_PLUGINS_HIVE_SERVICE_USER, hiveUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HIVE_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hive_user", HIVE_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", HIVE_ENV_CONFIG_TYPE);
+    }
+
+    // For Yarn
+    Config yarnConfig = cluster.getDesiredConfigByType(YARN_ENV_CONFIG_TYPE);
+
+    if (null != yarnConfig) {
+      String yarnUser = yarnConfig.getProperties().get("yarn_user");
+      if (null != yarnUser) {
+        targetValues.put(RANGER_PLUGINS_YARN_SERVICE_USER, yarnUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_YARN_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "yarn_user", YARN_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", YARN_ENV_CONFIG_TYPE);
+    }
+
+    // For Hbase
+    Config hbaseConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG_TYPE);
+
+    if (null != hbaseConfig) {
+      String hbaseUser = hbaseConfig.getProperties().get("hbase_user");
+      if (null != hbaseUser) {
+        targetValues.put(RANGER_PLUGINS_HBASE_SERVICE_USER, hbaseUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HBASE_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hbase_user", HBASE_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", HBASE_ENV_CONFIG_TYPE);
+    }
+
+    // For Knox
+    Config knoxConfig = cluster.getDesiredConfigByType(KNOX_ENV_CONFIG_TYPE);
+
+    if (null != knoxConfig) {
+      String knoxUser = knoxConfig.getProperties().get("knox_user");
+      if (null != knoxUser) {
+        targetValues.put(RANGER_PLUGINS_KNOX_SERVICE_USER, knoxUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KNOX_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "knox_user", KNOX_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", KNOX_ENV_CONFIG_TYPE);
+    }
+
+    // For Storm
+    Config stormConfig = cluster.getDesiredConfigByType(STORM_ENV_CONFIG_TYPE);
+
+    if (null != stormConfig) {
+      String stormUser = stormConfig.getProperties().get("storm_user");
+      if (null != stormUser) {
+        targetValues.put(RANGER_PLUGINS_STORM_SERVICE_USER, stormUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_STORM_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "storm_user", STORM_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", STORM_ENV_CONFIG_TYPE);
+    }
+
+    // For Kafka
+    Config kafkaConfig = cluster.getDesiredConfigByType(KAFKA_ENV_CONFIG_TYPE);
+
+    if (null != kafkaConfig) {
+      String kafkaUser = kafkaConfig.getProperties().get("kafka_user");
+      if (null != kafkaUser) {
+        targetValues.put(RANGER_PLUGINS_KAFKA_SERVICE_USER, kafkaUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KAFKA_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kafka_user", KAFKA_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", KAFKA_ENV_CONFIG_TYPE);
+    }
+
+    // For Ranger Kms
+    Config rangerKmsConfig = cluster.getDesiredConfigByType(RANGER_KMS_ENV_CONFIG_TYPE);
+
+    if (null != rangerKmsConfig) {
+      String rangerKmsUser = rangerKmsConfig.getProperties().get("kms_user");
+      if (null != rangerKmsUser) {
+        targetValues.put(RANGER_PLUGINS_KMS_SERVICE_USER, rangerKmsUser);
+        rangerAdminconfig.setProperties(targetValues);
+        rangerAdminconfig.persist(false);
+        sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KMS_SERVICE_USER);
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kms_user", RANGER_KMS_ENV_CONFIG_TYPE);
+      }
+    } else {
+      errMsg = errMsg + MessageFormat.format("{0} not found\n", RANGER_KMS_ENV_CONFIG_TYPE);
+    }
+
+    // Set spnego principal
+    if (cluster.getSecurityType() == SecurityType.KERBEROS) {
+      Config hdfsSiteConfig = cluster.getDesiredConfigByType(HDFS_SITE_CONFIG_TYPE);
+
+      if (null != hdfsSiteConfig) {
+        String spnegoPrincipal = hdfsSiteConfig.getProperties().get("dfs.web.authentication.kerberos.principal");
+        String spnegoKeytab = hdfsSiteConfig.getProperties().get("dfs.web.authentication.kerberos.keytab");
+
+        if (null != spnegoPrincipal) {
+          targetValues.put(RANGER_SPNEGO_PRINCIPAL, spnegoPrincipal);
+          rangerAdminconfig.setProperties(targetValues);
+          rangerAdminconfig.persist(false);
+          sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_PRINCIPAL);
+        } else {
+          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.principal", HDFS_SITE_CONFIG_TYPE);          
+        }
+
+        if (null != spnegoKeytab) {
+          targetValues.put(RANGER_SPNEGO_KEYTAB, spnegoKeytab);
+          rangerAdminconfig.setProperties(targetValues);
+          rangerAdminconfig.persist(false);
+          sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_KEYTAB);
+        } else {
+          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);          
+        }
+
+      } else {
+        errMsg = errMsg + MessageFormat.format("{0} not found \n", HDFS_SITE_CONFIG_TYPE);
+      }
+
+    }
+
+    String outputMsg = MessageFormat.format("Successfully set {0} properties in {1}", sucessMsg, RANGER_ADMIN_SITE_CONFIG_TYPE);
+
+    if(!errMsg.equalsIgnoreCase("")) {
+      outputMsg = outputMsg + MessageFormat.format("\n {0}", errMsg, RANGER_ADMIN_SITE_CONFIG_TYPE);
+    }
+
+    return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputMsg, "");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
index 35910ee..2ba3794 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
@@ -211,4 +211,112 @@
     </depends-on>
     <on-ambari-upgrade add="true"/>
   </property>
+
+  <property>
+    <name>ranger.plugins.hdfs.serviceuser</name>
+    <value>hdfs</value>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>hdfs_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.hive.serviceuser</name>
+    <value>hive</value>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.hbase.serviceuser</name>
+    <value>hbase</value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.yarn.serviceuser</name>
+    <value>yarn</value>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.knox.serviceuser</name>
+    <value>knox</value>
+    <depends-on>
+      <property>
+        <type>knox-env</type>
+        <name>knox_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.storm.serviceuser</name>
+    <value>storm</value>
+    <depends-on>
+      <property>
+        <type>storm-env</type>
+        <name>storm_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.kafka.serviceuser</name>
+    <value>kafka</value>
+    <depends-on>
+      <property>
+        <type>kafka-env</type>
+        <name>kafka_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.atlas.serviceuser</name>
+    <value>atlas</value>
+    <depends-on>
+      <property>
+        <type>atlas-env</type>
+        <name>metadata_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.kms.serviceuser</name>
+    <value>kms</value>
+    <depends-on>
+      <property>
+        <type>kms-env</type>
+        <name>kms_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index f40f760..464c444 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -379,6 +379,10 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
+        <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+      </execute-stage>
+
       <!-- RANGER KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index ea5ff5a..dd04b64 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -447,6 +447,8 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site" />
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property" />
 
+          <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+
           <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading Ranger database schema">
             <script>scripts/ranger_admin.py</script>
             <function>setup_ranger_database</function>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index 7fb03dc..27461e8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -318,6 +318,10 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
+        <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+      </execute-stage>
+
       <!-- HDFS -->
       <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 7f988e3..ad1bc34 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -442,6 +442,8 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site" />
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property" />
 
+          <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/>
+
           <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading Ranger database schema">
             <script>scripts/ranger_admin.py</script>
             <function>setup_ranger_database</function>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 2a77017..9c2c08d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1310,6 +1310,27 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
         putRangerAdminProperty('ranger.kms.service.user.hive', hive_user)
 
+    ranger_plugins_serviceuser = [
+      {'service_name': 'HDFS', 'file_name': 'hadoop-env', 'config_name': 'hdfs_user', 'target_configname': 'ranger.plugins.hdfs.serviceuser'},
+      {'service_name': 'HIVE', 'file_name': 'hive-env', 'config_name': 'hive_user', 'target_configname': 'ranger.plugins.hive.serviceuser'},
+      {'service_name': 'YARN', 'file_name': 'yarn-env', 'config_name': 'yarn_user', 'target_configname': 'ranger.plugins.yarn.serviceuser'},
+      {'service_name': 'HBASE', 'file_name': 'hbase-env', 'config_name': 'hbase_user', 'target_configname': 'ranger.plugins.hbase.serviceuser'},
+      {'service_name': 'KNOX', 'file_name': 'knox-env', 'config_name': 'knox_user', 'target_configname': 'ranger.plugins.knox.serviceuser'},
+      {'service_name': 'STORM', 'file_name': 'storm-env', 'config_name': 'storm_user', 'target_configname': 'ranger.plugins.storm.serviceuser'},
+      {'service_name': 'KAFKA', 'file_name': 'kafka-env', 'config_name': 'kafka_user', 'target_configname': 'ranger.plugins.kafka.serviceuser'},
+      {'service_name': 'RANGER_KMS', 'file_name': 'kms-env', 'config_name': 'kms_user', 'target_configname': 'ranger.plugins.kms.serviceuser'},
+      {'service_name': 'ATLAS', 'file_name': 'atlas-env', 'config_name': 'metadata_user', 'target_configname': 'ranger.plugins.atlas.serviceuser'}
+    ]
+
+    for item in range(len(ranger_plugins_serviceuser)):
+      if ranger_plugins_serviceuser[item]['service_name'] in servicesList:
+        file_name = ranger_plugins_serviceuser[item]['file_name']
+        config_name = ranger_plugins_serviceuser[item]['config_name']
+        target_configname = ranger_plugins_serviceuser[item]['target_configname']
+        if file_name in services["configurations"] and config_name in services["configurations"][file_name]["properties"]:
+          service_user = services["configurations"][file_name]["properties"][config_name]
+          putRangerAdminProperty(target_configname, service_user)
+
   def validateRangerTagsyncConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     ranger_tagsync_properties = getSiteProperties(configurations, "ranger-tagsync-site")
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/8e3ddf6a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
new file mode 100644
index 0000000..133a9e3
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.actionmanager.HostRoleCommand;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigImpl;
+import org.easymock.EasyMock;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Injector;
+
+/**
+ * Tests upgrade-related server side actions
+*/
+
+public class RangerKerberosConfigCalculationTest {
+
+  private Injector m_injector;
+  private Clusters m_clusters;
+  private Field m_clusterField;
+
+  @Before
+  public void setup() throws Exception {
+    m_injector = EasyMock.createMock(Injector.class);
+    m_clusters = EasyMock.createMock(Clusters.class);
+    Cluster cluster = EasyMock.createMock(Cluster.class);
+
+    Config hadoopConfig = new ConfigImpl("hadoop-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("hdfs_user", "hdfs");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+
+    Config hiveConfig = new ConfigImpl("hive-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("hive_user", "hive");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config yarnConfig = new ConfigImpl("yarn-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("yarn_user", "yarn");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config hbaseConfig = new ConfigImpl("hbase-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("hbase_user", "hbase");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config knoxConfig = new ConfigImpl("knox-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("knox_user", "knox");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config stormConfig = new ConfigImpl("storm-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("storm_user", "storm");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config kafkaConfig = new ConfigImpl("kafka-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("kafka_user", "kafka");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config kmsConfig = new ConfigImpl("kms-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("kms_user", "kms");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config hdfsSiteConfig = new ConfigImpl("hdfs-site") {
+      Map<String, String> mockProperties = new HashMap<String, String>() {{
+        put("dfs.web.authentication.kerberos.principal", "HTTP/_HOST.COM");
+        put("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab");
+      }};
+
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+    };
+
+    Config adminSiteConfig = new ConfigImpl("ranger-admin-site") {
+      Map<String, String> mockProperties = new HashMap<String, String>();
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+
+      @Override
+      public void setProperties(Map<String, String> properties) {
+        mockProperties.putAll(properties);
+      }
+
+      @Override
+      public void persist(boolean newConfig) {
+        // no-op
+      }
+    };
+
+    expect(cluster.getDesiredConfigByType("hadoop-env")).andReturn(hadoopConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(yarnConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("hbase-env")).andReturn(hbaseConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("knox-env")).andReturn(knoxConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("storm-env")).andReturn(stormConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("kafka-env")).andReturn(kafkaConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("kms-env")).andReturn(kmsConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(hdfsSiteConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("ranger-admin-site")).andReturn(adminSiteConfig).atLeastOnce();
+
+    expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
+    expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
+    expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
+
+    replay(m_injector, m_clusters, cluster);
+
+    m_clusterField = RangerKerberosConfigCalculation.class.getDeclaredField("m_clusters");
+    m_clusterField.setAccessible(true);
+  }
+
+  @Test
+  public void testAction() throws Exception {
+
+    Map<String, String> commandParams = new HashMap<String, String>();
+    commandParams.put("clusterName", "c1");
+
+    ExecutionCommand executionCommand = new ExecutionCommand();
+    executionCommand.setCommandParams(commandParams);
+    executionCommand.setClusterName("c1");
+
+    HostRoleCommand hrc = EasyMock.createMock(HostRoleCommand.class);
+    expect(hrc.getRequestId()).andReturn(1L).anyTimes();
+    expect(hrc.getStageId()).andReturn(2L).anyTimes();
+    expect(hrc.getExecutionCommandWrapper()).andReturn(new ExecutionCommandWrapper(executionCommand)).anyTimes();
+    replay(hrc);
+
+    RangerKerberosConfigCalculation action = new RangerKerberosConfigCalculation();
+    m_clusterField.set(action, m_clusters);
+
+    action.setExecutionCommand(executionCommand);
+    action.setHostRoleCommand(hrc);
+
+    CommandReport report = action.execute(null);
+    assertNotNull(report);
+
+    Cluster c = m_clusters.getCluster("c1");
+    Config config = c.getDesiredConfigByType("ranger-admin-site");
+    Map<String, String> map = config.getProperties();
+
+    assertTrue(map.containsKey("ranger.plugins.hdfs.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.hive.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.yarn.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.hbase.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.knox.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.storm.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.kafka.serviceuser"));
+    assertTrue(map.containsKey("ranger.plugins.kms.serviceuser"));
+    assertTrue(map.containsKey("ranger.spnego.kerberos.principal"));
+    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));    
+
+
+    assertEquals("hdfs", map.get("ranger.plugins.hdfs.serviceuser"));
+    assertEquals("hive", map.get("ranger.plugins.hive.serviceuser"));
+    assertEquals("yarn", map.get("ranger.plugins.yarn.serviceuser"));
+    assertEquals("hbase", map.get("ranger.plugins.hbase.serviceuser"));
+    assertEquals("knox", map.get("ranger.plugins.knox.serviceuser"));
+    assertEquals("storm", map.get("ranger.plugins.storm.serviceuser"));
+    assertEquals("kafka", map.get("ranger.plugins.kafka.serviceuser"));
+    assertEquals("kms", map.get("ranger.plugins.kms.serviceuser"));
+    assertEquals("HTTP/_HOST.COM", map.get("ranger.spnego.kerberos.principal"));
+    assertEquals("/etc/security/keytabs/spnego.kytab", map.get("ranger.spnego.kerberos.keytab"));
+
+    report = action.execute(null);
+    assertNotNull(report);
+
+  }
+
+}