You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by vb...@apache.org on 2017/06/15 21:33:23 UTC

ambari git commit: AMBARI-21148. Add a flag to indicate NN restart is rolling.(vbrodetskyi)

Repository: ambari
Updated Branches:
  refs/heads/trunk bd918763c -> 8873e9928


AMBARI-21148. Add a flag to indicate NN restart is rolling.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8873e992
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8873e992
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8873e992

Branch: refs/heads/trunk
Commit: 8873e99288bfb929a0e279b8366e1bfb135a5f91
Parents: bd91876
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Jun 16 00:32:13 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Fri Jun 16 00:32:13 2017 +0300

----------------------------------------------------------------------
 .../AmbariCustomCommandExecutionHelper.java     | 26 +++++++++++++++-----
 .../AmbariManagementControllerImpl.java         |  2 +-
 .../internal/UpgradeResourceProvider.java       |  8 +++---
 .../apache/ambari/server/utils/StageUtils.java  |  8 ++++--
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 10 +++++---
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  4 ++-
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  | 10 +++++---
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |  2 +-
 .../3.0.0.3.0/package/scripts/params_linux.py   |  2 ++
 .../HDP/2.0.6/configuration/cluster-env.xml     | 12 +++++++++
 .../HDP/3.0/configuration/cluster-env.xml       | 12 +++++++++
 12 files changed, 74 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 07f6e30..0473690 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -84,6 +84,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.CommandScriptDefinition;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.CustomCommandDefinition;
 import org.apache.ambari.server.state.DesiredConfig;
@@ -270,8 +271,8 @@ public class AmbariCustomCommandExecutionHelper {
    * @throws AmbariException
    */
   private void addCustomCommandAction(final ActionExecutionContext actionExecutionContext,
-      final RequestResourceFilter resourceFilter, Stage stage,
-      Map<String, String> additionalCommandParams, String commandDetail) throws AmbariException {
+      final RequestResourceFilter resourceFilter, Stage stage, Map<String, String> additionalCommandParams,
+      String commandDetail, Map<String, String> requestParams) throws AmbariException {
     final String serviceName = resourceFilter.getServiceName();
     final String componentName = resourceFilter.getComponentName();
     final String commandName = actionExecutionContext.getActionName();
@@ -479,6 +480,19 @@ public class AmbariCustomCommandExecutionHelper {
         commandTimeout = Math.max(60, commandTimeout);
       }
 
+      if (requestParams != null && requestParams.containsKey("context")) {
+        String requestContext = requestParams.get("context");
+        if (StringUtils.isNotEmpty(requestContext) && requestContext.toLowerCase().contains("rolling-restart")) {
+          Config clusterEnvConfig = cluster.getDesiredConfigByType("cluster-env");
+          if (clusterEnvConfig != null) {
+            String componentRollingRestartTimeout = clusterEnvConfig.getProperties().get("namenode_rolling_restart_timeout");
+            if (StringUtils.isNotEmpty(componentRollingRestartTimeout)) {
+              commandTimeout = Integer.parseInt(componentRollingRestartTimeout);
+            }
+          }
+        }
+      }
+
       commandParams.put(COMMAND_TIMEOUT, "" + commandTimeout);
       commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
@@ -1038,7 +1052,7 @@ public class AmbariCustomCommandExecutionHelper {
 
       if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
         commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
-        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString());
+        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), null);
       }
     }
   }
@@ -1163,7 +1177,7 @@ public class AmbariCustomCommandExecutionHelper {
           }
         }
 
-        addCustomCommandAction(actionExecutionContext, resourceFilter, stage, extraParams, commandDetail);
+        addCustomCommandAction(actionExecutionContext, resourceFilter, stage, extraParams, commandDetail, requestParams);
       } else {
         throw new AmbariException("Unsupported action " + actionName);
       }
@@ -1392,9 +1406,9 @@ public class AmbariCustomCommandExecutionHelper {
    * @return a wrapper of the important JSON structures to add to a stage
    */
   public ExecuteCommandJson getCommandJson(ActionExecutionContext actionExecContext,
-      Cluster cluster, RepositoryVersionEntity repositoryVersion) throws AmbariException {
+      Cluster cluster, RepositoryVersionEntity repositoryVersion, String requestContext) throws AmbariException {
 
-    Map<String, String> commandParamsStage = StageUtils.getCommandParamsStage(actionExecContext);
+    Map<String, String> commandParamsStage = StageUtils.getCommandParamsStage(actionExecContext, requestContext);
     Map<String, String> hostParamsStage = new HashMap<>();
     Map<String, Set<String>> clusterHostInfo;
     String clusterHostInfoJson = "{}";

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 8d262e2..4be9419 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -4108,7 +4108,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
 
     ExecuteCommandJson jsons = customCommandExecutionHelper.getCommandJson(actionExecContext,
-        cluster, desiredRepositoryVersion);
+        cluster, desiredRepositoryVersion, requestContext);
 
     String commandParamsForStage = jsons.getCommandParamsForStage();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 60665f7..2f6ffc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -917,7 +917,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
 
     ExecuteCommandJson jsons = s_commandExecutionHelper.get().getCommandJson(actionContext,
-        cluster, effectiveRepositoryVersion);
+        cluster, effectiveRepositoryVersion, null);
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
         cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
@@ -999,7 +999,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     actionContext.setMaintenanceModeHostExcluded(true);
 
     ExecuteCommandJson jsons = s_commandExecutionHelper.get().getCommandJson(actionContext,
-        cluster, effectiveRepositoryVersion);
+        cluster, effectiveRepositoryVersion, null);
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
         cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
@@ -1060,7 +1060,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     actionContext.setMaintenanceModeHostExcluded(true);
 
     ExecuteCommandJson jsons = s_commandExecutionHelper.get().getCommandJson(actionContext,
-        cluster, effectiveRepositoryVersion);
+        cluster, effectiveRepositoryVersion, null);
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
         cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
@@ -1188,7 +1188,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     actionContext.setMaintenanceModeHostExcluded(true);
 
     ExecuteCommandJson jsons = s_commandExecutionHelper.get().getCommandJson(actionContext,
-        cluster, context.getRepositoryVersion());
+        cluster, context.getRepositoryVersion(), null);
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
         cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getCommandParamsForStage(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 9930148..e7a94d4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -260,8 +260,12 @@ public class StageUtils {
     return mapper.readValue(is, clazz);
   }
 
-  public static Map<String, String> getCommandParamsStage(ActionExecutionContext actionExecContext) throws AmbariException {
-    return actionExecContext.getParameters() != null ? actionExecContext.getParameters() : new TreeMap<String, String>();
+  public static Map<String, String> getCommandParamsStage(ActionExecutionContext actionExecContext, String requestContext) throws AmbariException {
+    Map<String, String> commandParams = actionExecContext.getParameters() != null ? actionExecContext.getParameters() : new TreeMap<String, String>();
+    if (StringUtils.isNotEmpty(requestContext) && requestContext.toLowerCase().contains("rolling-restart")) {
+      commandParams.put("rolling_restart", "true");
+    }
+    return commandParams;
   }
 
   public static Map<String, Set<String>> getClusterHostInfo(Cluster cluster) throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index aa34dc0..139fe98 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -47,7 +47,7 @@ from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_direct
 
 import namenode_upgrade
 
-def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
+def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False, retries=115, sleep_seconds=10):
   """
   During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
   all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
@@ -55,8 +55,6 @@ def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
   """
   import params
 
-  retries = 115
-  sleep_seconds = 10
   sleep_minutes = int(sleep_seconds * retries / 60)
 
   Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
@@ -217,7 +215,11 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
 
     # wait for Safemode to end
     if ensure_safemode_off:
-      wait_for_safemode_off(hdfs_binary)
+      if params.rolling_restart and params.rolling_restart_safemode_exit_timeout:
+        calculated_retries = int(params.rolling_restart_safemode_exit_timeout) / 30
+        wait_for_safemode_off(hdfs_binary, afterwait_sleep=30, retries=calculated_retries, sleep_seconds=30)
+      else:
+        wait_for_safemode_off(hdfs_binary)
 
     # Always run this on the "Active" NN unless Safemode has been ignored
     # in the case where safemode was ignored (like during an express upgrade), then

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 65cd378..c554349 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -180,7 +180,7 @@ class NameNodeDefault(NameNode):
     namenode_upgrade.prepare_rolling_upgrade(hfds_binary)
 
   def wait_for_safemode_off(self, env):
-    wait_for_safemode_off(self.get_hdfs_binary(), 30, True)
+    wait_for_safemode_off(self.get_hdfs_binary(), afterwait_sleep=30, execute_kinit=True)
 
   def finalize_non_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index e88dbdd..82fd950 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -54,6 +54,8 @@ architecture = get_architecture()
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
 upgrade_direction = default("/commandParams/upgrade_direction", None)
+rolling_restart = default("/commandParams/rolling_restart", False)
+rolling_restart_safemode_exit_timeout = default("/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout", None)
 stack_version_unformatted = config['hostLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
@@ -552,4 +554,4 @@ if enable_ranger_hdfs:
 # need this to capture cluster name from where ranger hdfs plugin is enabled
 cluster_name = config['clusterName']
 
-# ranger hdfs plugin section end
\ No newline at end of file
+# ranger hdfs plugin section end

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index a0ed658..181b3c8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -47,7 +47,7 @@ from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_direct
 
 import namenode_upgrade
 
-def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
+def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False, retries=115, sleep_seconds=10):
   """
   During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
   all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
@@ -55,8 +55,6 @@ def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
   """
   import params
 
-  retries = 115
-  sleep_seconds = 10
   sleep_minutes = int(sleep_seconds * retries / 60)
 
   Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
@@ -216,7 +214,11 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
 
     # wait for Safemode to end
     if ensure_safemode_off:
-      wait_for_safemode_off(hdfs_binary)
+      if params.rolling_restart and params.rolling_restart_safemode_exit_timeout:
+        calculated_retries = int(params.rolling_restart_safemode_exit_timeout) / 30
+        wait_for_safemode_off(hdfs_binary, afterwait_sleep=30, retries=calculated_retries, sleep_seconds=30)
+      else:
+        wait_for_safemode_off(hdfs_binary)
 
     # Always run this on the "Active" NN unless Safemode has been ignored
     # in the case where safemode was ignored (like during an express upgrade), then

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index a42ca79..4c4a7eb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -180,7 +180,7 @@ class NameNodeDefault(NameNode):
     namenode_upgrade.prepare_rolling_upgrade(hfds_binary)
 
   def wait_for_safemode_off(self, env):
-    wait_for_safemode_off(self.get_hdfs_binary(), 30, True)
+    wait_for_safemode_off(self.get_hdfs_binary(), afterwait_sleep=30, execute_kinit=True)
 
   def finalize_non_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index 58bb65f..ad49d81 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -51,6 +51,8 @@ tmp_dir = Script.get_tmp_dir()
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
 upgrade_direction = default("/commandParams/upgrade_direction", None)
+rolling_restart = default("/commandParams/rolling_restart", False)
+rolling_restart_safemode_exit_timeout = default("/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout", None)
 stack_version_unformatted = config['hostLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 733b10d..f7d5de5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -318,4 +318,16 @@ gpgcheck=0</value>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
   </property>
+  <property>
+    <name>namenode_rolling_restart_timeout</name>
+    <value>4200</value>
+    <description>Timeout for namenode rolling restart command.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_rolling_restart_safemode_exit_timeout</name>
+    <value>3600</value>
+    <description>Timeout for safemode exit, during namenode rolling restart</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8873e992/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
index a79e904..341079b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -307,4 +307,16 @@ gpgcheck=0</value>
     <description>Flag to turn on when external setup of External Ranger is done.</description>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>namenode_rolling_restart_timeout</name>
+    <value>4200</value>
+    <description>Timeout for namenode rolling restart command.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>namenode_rolling_restart_safemode_exit_timeout</name>
+    <value>3600</value>
+    <description>Timeout for safemode exit, during namenode rolling restart</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>