You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/12/08 23:29:59 UTC

[20/25] ambari git commit: Merge from branch-2.5

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
index 9547335..3b814a9 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>storm.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>nimbus_seeds_supported</name>
     <value>true</value>
     <description/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index e753e98..4859534 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -170,7 +170,7 @@ if stack_supports_storm_kerberos:
   else:
     storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
 
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 metric_collector_port = None
 if has_metric_collector:
@@ -214,10 +214,10 @@ jar_jvm_opts = ''
 ########################################################
 #region Atlas Hooks
 storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
-
+if enable_atlas_hook:
   # Only append /etc/atlas/conf to classpath if on HDP 2.4.*
   if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
     atlas_conf_dir = os.environ['METADATA_CONF'] if 'METADATA_CONF' in os.environ else '/etc/atlas/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
index bda4fe2..f02ced4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
@@ -99,7 +99,7 @@ def storm(name=None):
   )
 
   # Generate atlas-application.properties.xml file and symlink the hook jars
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.conf_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.STORM, params.storm_atlas_application_properties, atlas_hook_filepath, params.storm_user, params.user_group)
     storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib")

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index 1d8963d..a36e8cc 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -126,34 +126,56 @@ class ServiceCheckDefault(ServiceCheck):
       if "application" in item:
         application_name = item
 
-    for rm_webapp_address in params.rm_webapp_addresses_list:
-      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    # Find out the active RM from RM list
+    # Raise an exception if the active rm cannot be determined
+    active_rm_webapp_address = self.get_active_rm_webapp_address()
+    Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
 
-      get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+    # Verify job state from active resource manager via rest api
+    info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
 
-      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                            user=params.smokeuser,
-                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                            )
+    return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
+                                                  user=params.smokeuser,
+                                                  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                  )
 
-      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
-      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
-        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
-        continue
+    try:
+      json_response = json.loads(stdout)
+    except Exception as e:
+      raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
 
-      try:
-        json_response = json.loads(stdout)
-      except Exception as e:
-        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-      
-      if json_response is None or 'app' not in json_response or \
-              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-        raise Fail("Application " + app_url + " returns invalid data.")
-
-      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+    if json_response is None or 'app' not in json_response or \
+            'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
+      raise Fail("Application " + app_url + " returns invalid data.")
 
+    if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+      raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
 
+  def get_active_rm_webapp_address(self):
+    import params
+    active_rm_webapp_address = None
+    rm_webapp_addresses = params.rm_webapp_addresses_list
+    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
+      for rm_webapp_address in rm_webapp_addresses:
+        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
+        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
+        try:
+          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
+                                                        user=params.smokeuser,
+                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                        )
+          json_response = json.loads(stdout)
+          if json_response is not None and 'clusterInfo' in json_response \
+            and json_response['clusterInfo']['haState'] == "ACTIVE":
+              active_rm_webapp_address = rm_webapp_address
+              break
+        except Exception as e:
+          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
+
+    if active_rm_webapp_address is None:
+      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
+    return active_rm_webapp_address
 
 if __name__ == "__main__":
   ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
index 525078e..cbb5ba2 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.6/metainfo.xml
@@ -35,7 +35,7 @@
           </packages>
         </osSpecific>
         <osSpecific>
-          <osFamily>ubuntu12</osFamily>
+          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
             <package>
               <name>zookeeper-${stack_version}</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
index 34169c1..ee8d2d1 100755
--- a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
+++ b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
@@ -84,6 +84,10 @@ echo "[" | cat > "$JSON_INPUT"
 while read -r LINE
 do
   USR_NAME=$(echo "$LINE" | awk -F, '{print $1}')
+  echo "Processing user name: $USR_NAME"
+
+  # encoding the username
+  USR_NAME=$(printf "%q" "$USR_NAME")
 
   cat <<EOF >> "$JSON_INPUT"
     {
@@ -97,7 +101,10 @@ do
 EOF
 done <"$CSV_FILE"
 
+# deleting the last line
 sed -i '$ d' "$JSON_INPUT"
+
+# appending json closing elements to the end of the file
 echo $'}\n]' | cat >> "$JSON_INPUT"
 echo "Generating file $JSON_INPUT ... DONE."
 echo "Processing post user creation hook payload ... DONE."

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 5926c39..abfab87 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,13 +70,11 @@ def main(argv=None):
   if len(args) < 3:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   action = args[0]
   if action not in ALL_ACTIONS:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   hostsFile = args[1]
   servicesFile = args[2]
@@ -89,6 +87,7 @@ def main(argv=None):
   stackName = services["Versions"]["stack_name"]
   stackVersion = services["Versions"]["stack_version"]
   parentVersions = []
+
   if "stack_hierarchy" in services["Versions"]:
     parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
 
@@ -96,8 +95,9 @@ def main(argv=None):
 
   # Perform action
   actionDir = os.path.realpath(os.path.dirname(args[1]))
-  result = {}
-  result_file = "non_valid_result_file.json"
+
+  # filter
+  hosts = stackAdvisor.filterHostMounts(hosts, services)
 
   if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
     result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,12 +111,11 @@ def main(argv=None):
   elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
     result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
-  else: # action == VALIDATE_CONFIGURATIONS
+  else:  # action == VALIDATE_CONFIGURATIONS
     result = stackAdvisor.validateConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations-validation.json")
 
   dumpJson(result, result_file)
-  pass
 
 
 def instantiateStackAdvisor(stackName, stackVersion, parentVersions):

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index cc6c8a3..93680bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -280,4 +280,14 @@ gpgcheck=0</value>
     <description>YARN Memory widget should be hidden by default on the dashboard.</description>
     <on-ambari-upgrade add="true"/>
   </property>
+   <property>
+    <name>agent_mounts_ignore_list</name>
+    <value/>
+    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <visible>true</visible>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 320872e..f97789b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -105,13 +105,17 @@ def create_users_and_groups(user_and_groups):
 
   import params
 
-  parts = re.split('\s', user_and_groups)
+  parts = re.split('\s+', user_and_groups)
   if len(parts) == 1:
     parts.append("")
 
   users_list = parts[0].split(",") if parts[0] else []
   groups_list = parts[1].split(",") if parts[1] else []
 
+  # skip creating groups and users if * is provided as value.
+  users_list = filter(lambda x: x != '*' , users_list)
+  groups_list = filter(lambda x: x != '*' , groups_list)
+
   if users_list:
     User(users_list,
           fetch_nonlocal_groups = params.fetch_nonlocal_groups

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 4603c8b..4a5ee25 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -90,7 +90,7 @@ jtnode_host = default("/clusterHostInfo/jtnode_host", [])
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index dd87b72..27a755c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -289,6 +289,11 @@
       "min_version": "2.5.0.0"
     },
     {
+      "name": "ranger_setup_db_on_start",
+      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+      "min_version": "2.6.0.0"
+    },
+    {
       "name": "storm_metrics_apache_classes",
       "description": "Metrics sink for Storm that uses Apache class names",
       "min_version": "2.5.0.0"

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 0c74c02..e47743e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -100,9 +100,23 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
       "AMBARI_METRICS": self.recommendAmsConfigurations,
-      "RANGER": self.recommendRangerConfigurations
+      "RANGER": self.recommendRangerConfigurations,
+      "ZOOKEEPER": self.recommendZookeeperConfigurations,
+      "OOZIE": self.recommendOozieConfigurations
     }
 
+  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    oozie_mount_properties = [
+      ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
+    ]
+    self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
+
+  def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
+    zk_mount_properties = [
+      ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
+    ]
+    self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
+
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
     putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -115,6 +129,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
+    yarn_mount_properties = [
+      ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
+      ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
+      ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
+      ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
+    ]
+
+    self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
+
     sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
     if sc_queue_name is not None:
       putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -145,6 +168,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
     putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
     putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+    mapred_mounts = [
+      ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
+    ]
+
+    self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
+
     mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
     if mr_queue is not None:
       putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -341,12 +371,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       if len(namenodes.split(',')) > 1:
         putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
 
-    #Initialize default 'dfs.datanode.data.dir' if needed
-    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
-      dataDirs = '/hadoop/hdfs/data'
-      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
-    else:
-      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+    hdfs_mount_properties = [
+      ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
+      ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
+      ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
+    ]
+
+    self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
+
+    dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
 
     # dfs.datanode.du.reserved should be set to 10-15% of volume size
     # For each host selects maximum size of the volume. Then gets minimum for all hosts.

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 9678dc1..17225d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,12 +24,30 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
     childRecommendConfDict = {
       "OOZIE": self.recommendOozieConfigurations,
       "HIVE": self.recommendHiveConfigurations,
-      "TEZ": self.recommendTezConfigurations
+      "TEZ": self.recommendTezConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    storm_mounts = [
+      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+    ]
+
+    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+    falcon_mounts = [
+      ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
+    ]
+
+    self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
+
   def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+
     oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
     oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
     putOozieProperty = self.putProperty(configurations, "oozie-site", services)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index ab350dc..287db10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -232,6 +232,10 @@ limitations under the License.
     <depends-on>
       <property>
         <type>hive-env</type>
+        <name>hive.atlas.hook</name>
+      </property>
+      <property>
+        <type>hive-env</type>
         <name>hive_timeline_logging_enabled</name>
       </property>
       <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 8187da8..3789a16 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,10 +44,17 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "LOGSEARCH" : self.recommendLogsearchConfigurations,
       "SPARK": self.recommendSparkConfigurations,
+      "KAFKA": self.recommendKafkaConfigurations,
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
+    kafka_mounts = [
+      ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+    ]
+
+    self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
 
   def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
     """
@@ -1019,27 +1026,50 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def recommendLogsearchConfigurations(self, configurations, clusterData, services, hosts):
     putLogsearchProperty = self.putProperty(configurations, "logsearch-properties", services)
+    putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
+    putLogsearchEnvProperty = self.putProperty(configurations, "logsearch-env", services)
+    putLogsearchEnvAttribute = self.putPropertyAttribute(configurations, "logsearch-env")
+    putLogfeederEnvAttribute = self.putPropertyAttribute(configurations, "logfeeder-env")
+
     infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
 
-    if infraSolrHosts is not None and len(infraSolrHosts) > 0 \
-      and "logsearch-properties" in services["configurations"]:
+    if infraSolrHosts is not None and len(infraSolrHosts) > 0 and "logsearch-properties" in services["configurations"]:
+      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
+      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
+      
       recommendedMinShards = len(infraSolrHosts)
       recommendedShards = 2 * len(infraSolrHosts)
       recommendedMaxShards = 3 * len(infraSolrHosts)
-      # recommend number of shard
-      putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
-
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
-      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
-      putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
-      # recommend replication factor
-      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
-      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
-      putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
-      putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    else:
+      recommendedReplicationFactor = 2
+      
+      recommendedMinShards = 1
+      recommendedShards = 1
+      recommendedMaxShards = 100
+      
+      putLogsearchEnvProperty('logsearch_use_external_solr', 'true')
+      putLogsearchEnvAttribute('logsearch_use_external_solr', 'visible', 'false')
+
+    # recommend number of shard
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
+
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
+    putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
+    putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
+    # recommend replication factor
+    putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
+    putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+    
+    kerberos_authentication_enabled = self.isSecurityEnabled(services)
+    if not kerberos_authentication_enabled:
+       putLogsearchEnvProperty('logsearch_external_solr_kerberos_enabled', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_enabled', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogsearchEnvAttribute('logsearch_external_solr_kerberos_principal', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_keytab', 'visible', 'false')
+       putLogfeederEnvAttribute('logfeeder_external_solr_kerberos_principal', 'visible', 'false')
 
   def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
index b71f4a9..0a0c34f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
@@ -67,6 +67,10 @@
         <type>application-properties</type>
         <name>atlas.authentication.method</name>
       </property>
+      <property>
+        <type>storm-env</type>
+        <name>storm.atlas.hook</name>
+      </property>
     </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 9376fed..cc53ae5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -81,7 +81,8 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "RANGER_KMS": self.recommendRangerKMSConfigurations,
       "STORM": self.recommendStormConfigurations,
-      "SQOOP": self.recommendSqoopConfigurations
+      "SQOOP": self.recommendSqoopConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
@@ -221,6 +222,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     super(HDP23StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
     putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
     putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
+    putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
     putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     # hive_security_authorization == 'ranger'
@@ -260,7 +262,19 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     hive_hooks = [x for x in hive_hooks if x != ""]
     is_atlas_present_in_cluster = "ATLAS" in servicesList
 
+    enable_atlas_hook = False
     if is_atlas_present_in_cluster:
+      putHiveEnvProperty("hive.atlas.hook", "true")
+    else:
+      putHiveEnvProperty("hive.atlas.hook", "false")
+
+    if ('hive-env' in services['configurations']) and ('hive.atlas.hook' in services['configurations']['hive-env']['properties']):
+      if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+        enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'] == "true"
+      elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+        enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       # Append atlas hook if not already present.
       is_atlas_hook_in_config = atlas_hook_class in hive_hooks
       if not is_atlas_hook_in_config:
@@ -756,7 +770,9 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     knox_port = '8443'
     if 'KNOX' in servicesList:
       knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
-      knox_host = knox_hosts[0]
+      if len(knox_hosts) > 0:
+        knox_hosts.sort()
+        knox_host = knox_hosts[0]
       if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
         knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
       putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
@@ -793,14 +809,28 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
   def recommendSqoopConfigurations(self, configurations, clusterData, services, hosts):
     putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+    putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
 
+    enable_atlas_hook = False
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if "ATLAS" in servicesList:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    else:
+      putSqoopEnvProperty("sqoop.atlas.hook", "false")
+
+    if ('sqoop-env' in services['configurations']) and ('sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']):
+      if 'sqoop-env' in configurations and 'sqoop.atlas.hook' in configurations['sqoop-env']['properties']:
+        enable_atlas_hook = configurations['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+      elif 'sqoop-env' in services['configurations'] and 'sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']:
+        enable_atlas_hook = services['configurations']['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
     putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    putStormEnvProperty = self.putProperty(configurations, "storm-env", services)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
 
     if "storm-site" in services["configurations"]:
@@ -816,11 +846,23 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       atlas_is_present = "ATLAS" in servicesList
       atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
       atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+      enable_atlas_hook = False
+
+      if atlas_is_present:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      else:
+        putStormEnvProperty("storm.atlas.hook", "false")
 
-      if atlas_is_present and not atlas_hook_is_set:
+      if ('storm-env' in services['configurations']) and ('storm.atlas.hook' in services['configurations']['storm-env']['properties']):
+        if 'storm-env' in configurations and 'storm.atlas.hook' in configurations['storm-env']['properties']:
+          enable_atlas_hook = configurations['storm-env']['properties']['storm.atlas.hook'] == "true"
+        elif 'storm-env' in services['configurations'] and 'storm.atlas.hook' in services['configurations']['storm-env']['properties']:
+          enable_atlas_hook = services['configurations']['storm-env']['properties']['storm.atlas.hook'] == "true"
+
+      if enable_atlas_hook and not atlas_hook_is_set:
         notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
 
-      if not atlas_is_present and atlas_hook_is_set:
+      if not enable_atlas_hook and atlas_hook_is_set:
         application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
         notifier_plugin_value = ",".join(application_classes) if application_classes else " "
 
@@ -830,6 +872,17 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
         putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
 
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+
+    putFalconEnvProperty = self.putProperty(configurations, "falcon-env", services)
+    enable_atlas_hook = False
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "ATLAS" in servicesList:
+      putFalconEnvProperty("falcon.atlas.hook", "true")
+    else:
+      putFalconEnvProperty("falcon.atlas.hook", "false")
+
   def getServiceConfigurationValidators(self):
     parentValidators = super(HDP23StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
index d824309..d274135 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -416,6 +417,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -633,16 +648,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index 882e78b..8c9414a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -467,6 +468,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -684,16 +699,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index 66c0a70..b7182d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -47,8 +47,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -616,6 +617,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -833,16 +848,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 019c76e..155aaf9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -48,8 +48,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -612,6 +613,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -829,16 +844,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index 4d1b5f1..bb70f6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -435,7 +435,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -457,7 +457,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 02cef57..c8baea1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -436,7 +436,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -458,7 +458,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..24e0193
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,176 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+{% if java_version &lt; 8 %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+
+{% else %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+# Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  {% if is_datanode_max_locked_memory_set %}
+  ulimit -l {{datanode_max_locked_memory}}
+  {% endif %}
+  ulimit -n {{hdfs_user_nofile_limit}}
+fi
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
index b7d7983..b9a7e1e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
@@ -35,8 +35,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -409,6 +410,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -626,16 +641,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index 5183580..2b768ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -43,8 +43,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -580,6 +581,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -797,16 +812,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 2a1ecf7..92ce832 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -48,8 +48,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -567,6 +568,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -784,16 +799,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 201150d..39b7a4d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -420,7 +420,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -442,7 +442,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index cb16953..2fd7a7a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -425,7 +425,7 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_UPGRADE" title="Stop Storm Services">
       <direction>UPGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -447,7 +447,7 @@
       </service>
     </group>
 
-    <group xsi:type="stop" name="STOP_STORM" title="Stop Storm Services">
+    <group xsi:type="stop" name="STOP_STORM_DOWNGRADE" title="Stop Storm Services">
       <direction>DOWNGRADE</direction>
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 87ede63..9ddb667 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -44,5 +44,17 @@
         </changes>
       </component>
     </service>
+
+    <service name="SPARK">
+      <component name="LIVY_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs">
+            <type>livy-conf</type>
+            <transfer operation="move" from-key="livy.server.kerberos.keytab" to-key="livy.server.launch.kerberos.keytab" />
+            <transfer operation="move" from-key="livy.server.kerberos.principal" to-key="livy.server.launch.kerberos.principal" />
+          </definition>
+        </changes>
+      </component>
+    </service>
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
index 414ce15..6bca487 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -441,6 +442,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -692,16 +707,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index fc4c8c5..66f872d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -36,8 +36,9 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
 
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
         <task xsi:type="manual">
-          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+          <message>Before continuing, please stop all YARN queues.</message>
         </task>
       </execute-stage>
 
@@ -59,6 +60,10 @@
           <function>prepare</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="SPARK" component="LIVY_SERVER" title="Apply config changes for Livy Server">
+        <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs"/>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services">
@@ -436,6 +441,20 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="START_YARN_QUEUES" title="Start YARN Queues">
+      <direction>UPGRADE</direction>
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
+        <condition xsi:type="config" type="yarn-site" property="yarn.resourcemanager.work-preserving-recovery.enabled" value="true" comparison="not-equals"/>
+        <task xsi:type="manual">
+          <message>Before continuing, please start all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="HBASE" title="HBASE">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -687,16 +706,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="cluster" name="MANUAL_STEPS" title="Finishing Upgrade">
-      <direction>UPGRADE</direction>
-
-      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Start YARN Queues">
-        <task xsi:type="manual">
-          <message>Please start previously stopped YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
-        </task>
-      </execute-stage>
-    </group>
-
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index eb4309a..1f7c1a8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -761,6 +761,9 @@
         </upgrade>
       </component>
       <component name="LIVY_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs" />
+        </pre-upgrade>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
index ec81f3e..3f619af 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>ACCUMULO</name>
-      <version>1.7.0.2.5</version>
+      <version>1.7.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
index 0b54385..b449388 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>FALCON</name>
-      <version>0.10.0.2.5</version>
+      <version>0.10.0.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
index 33ceb43..0d7cd1f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>FLUME</name>
-      <version>1.5.2.2.5</version>
+      <version>1.5.2.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
index 0feaa5e..9bc3ee2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>HBASE</name>
-      <version>1.1.2.2.5</version>
+      <version>1.1.2.2.6</version>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
index a3e4a64..1fc7f51 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
@@ -20,7 +20,7 @@
   <services>
     <service>
       <name>HDFS</name>
-      <version>2.7.1.2.5</version>
+      <version>2.7.3.2.6</version>
     </service>
   </services>
 </metainfo>