You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/04/26 20:00:16 UTC

[2/2] ambari git commit: AMBARI-20846. Use exclude list of mount device types on docker containers (dgrinenko via ncole)

AMBARI-20846. Use exclude list of mount device types on docker containers (dgrinenko via ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1ac59c3f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1ac59c3f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1ac59c3f

Branch: refs/heads/branch-2.4
Commit: 1ac59c3ff74e97c8f240969e19c317af49c75cce
Parents: 40747a9
Author: Nate Cole <nc...@hortonworks.com>
Authored: Wed Apr 26 16:00:02 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Wed Apr 26 16:00:02 2017 -0400

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |   1 +
 .../src/main/python/ambari_agent/Facter.py      |  82 ++--
 .../src/main/python/ambari_agent/Hardware.py    | 233 ++++++----
 .../test/python/ambari_agent/TestController.py  |   4 +-
 .../test/python/ambari_agent/TestHardware.py    | 237 +++++++---
 .../test/python/ambari_agent/TestHeartbeat.py   |   6 +-
 .../python/ambari_agent/TestRegistration.py     |   2 +-
 .../src/main/resources/scripts/stack_advisor.py |  11 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |  10 +
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  47 +-
 .../stacks/HDP/2.1/services/stack_advisor.py    |  20 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    |   7 +
 .../src/main/resources/stacks/stack_advisor.py  | 209 ++++++++-
 .../stacks/2.0.6/common/test_stack_advisor.py   |  16 +-
 .../stacks/2.1/common/test_stack_advisor.py     |   2 +
 .../stacks/2.2/common/test_stack_advisor.py     |  46 +-
 .../test/python/stacks/test_stack_adviser.py    | 239 ++++++++++
 .../upgrades/upgrade_multi_server_tasks.xml     |  88 ++++
 ambari-web/app/mixins.js                        |   1 -
 .../app/utils/configs/config_initializer.js     |  28 +-
 .../mount_points_based_initializer_mixin.js     | 340 --------------
 ambari-web/test/utils/ajax/ajax_test.js         |   9 +-
 .../utils/configs/config_initializer_test.js    | 457 -------------------
 23 files changed, 1044 insertions(+), 1051 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index 914e09a..f4ce402 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -36,6 +36,7 @@ alert_kinit_timeout=14400000
 system_resource_overrides=/etc/resource_overrides
 ; memory_threshold_soft_mb=400
 ; memory_threshold_hard_mb=1000
+; ignore_mount_points=/mnt/custom1,/mnt/custom2
 
 [security]
 keysdir=/var/lib/ambari-agent/keys

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/main/python/ambari_agent/Facter.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Facter.py b/ambari-agent/src/main/python/ambari_agent/Facter.py
index 7520f4c..a1f815d 100644
--- a/ambari-agent/src/main/python/ambari_agent/Facter.py
+++ b/ambari-agent/src/main/python/ambari_agent/Facter.py
@@ -196,41 +196,57 @@ class Facter(object):
         log.info("'system_resource_dir' is not set - it won't be used for gathering system resources.")
     return systemResources
 
+  def getFqdn(self):
+    raise NotImplementedError()
 
-  def facterInfo(self):
-    facterInfo = {}
-    facterInfo['id'] = self.getId()
-    facterInfo['kernel'] = self.getKernel()
-    facterInfo['domain'] = self.getDomain()
-    facterInfo['fqdn'] = self.getFqdn()
-    facterInfo['hostname'] = self.getHostname()
-    facterInfo['macaddress'] = self.getMacAddress()
-    facterInfo['architecture'] = self.getArchitecture()
-    facterInfo['operatingsystem'] = self.getOperatingSystem()
-    facterInfo['operatingsystemrelease'] = self.getOperatingSystemRelease()
-    facterInfo['physicalprocessorcount'] = self.getProcessorcount()
-    facterInfo['processorcount'] = self.getProcessorcount()
-    facterInfo['timezone'] = self.getTimeZone()
-    facterInfo['hardwareisa'] = self.getArchitecture()
-    facterInfo['hardwaremodel'] = self.getArchitecture()
-    facterInfo['kernelrelease'] = self.getKernelRelease()
-    facterInfo['kernelversion'] = self.getKernelVersion()
-    facterInfo['osfamily'] = self.getOsFamily()
-    facterInfo['kernelmajversion'] = self.getKernelMajVersion()
-
-    facterInfo['ipaddress'] = self.getIpAddress()
-    facterInfo['netmask'] = self.getNetmask()
-    facterInfo['interfaces'] = self.getInterfaces()
-
-    facterInfo['uptime_seconds'] = str(self.getUptimeSeconds())
-    facterInfo['uptime_hours'] = str(self.getUptimeHours())
-    facterInfo['uptime_days'] = str(self.getUptimeDays())
-
-    facterInfo['memorysize'] = self.getMemorySize()
-    facterInfo['memoryfree'] = self.getMemoryFree()
-    facterInfo['memorytotal'] = self.getMemoryTotal()
+  def getNetmask(self):
+    raise NotImplementedError()
 
-    return facterInfo
+  def getInterfaces(self):
+    raise NotImplementedError()
+
+  def getUptimeSeconds(self):
+    raise NotImplementedError()
+
+  def getMemorySize(self):
+    raise NotImplementedError()
+
+  def getMemoryFree(self):
+    raise NotImplementedError()
+
+  def getMemoryTotal(self):
+    raise NotImplementedError()
+
+  def facterInfo(self):
+    return {
+      'id': self.getId(),
+      'kernel': self.getKernel(),
+      'domain': self.getDomain(),
+      'fqdn': self.getFqdn(),
+      'hostname': self.getHostname(),
+      'macaddress': self.getMacAddress(),
+      'architecture': self.getArchitecture(),
+      'operatingsystem': self.getOperatingSystem(),
+      'operatingsystemrelease': self.getOperatingSystemRelease(),
+      'physicalprocessorcount': self.getProcessorcount(),
+      'processorcount': self.getProcessorcount(),
+      'timezone': self.getTimeZone(),
+      'hardwareisa': self.getArchitecture(),
+      'hardwaremodel': self.getArchitecture(),
+      'kernelrelease': self.getKernelRelease(),
+      'kernelversion': self.getKernelVersion(),
+      'osfamily': self.getOsFamily(),
+      'kernelmajversion': self.getKernelMajVersion(),
+      'ipaddress': self.getIpAddress(),
+      'netmask': self.getNetmask(),
+      'interfaces': self.getInterfaces(),
+      'uptime_seconds': str(self.getUptimeSeconds()),
+      'uptime_hours': str(self.getUptimeHours()),
+      'uptime_days': str(self.getUptimeDays()),
+      'memorysize': self.getMemorySize(),
+      'memoryfree': self.getMemoryFree(),
+      'memorytotal': self.getMemoryTotal()
+    }
 
   #Convert kB to GB
   @staticmethod

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 17acdf2..37411b2 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-'''
+"""
 Licensed to the Apache Software Foundation (ASF) under one
 or more contributor license agreements.  See the NOTICE file
 distributed with this work for additional information
@@ -16,123 +16,188 @@ distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-'''
+"""
 
 import os.path
 import logging
 import subprocess
+from resource_management.core import shell
 from resource_management.core.shell import call
-from resource_management.core.exceptions import ExecuteTimeoutException
-from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.core.exceptions import ExecuteTimeoutException, Fail
 from ambari_commons.shell import shellRunner
 from Facter import Facter
 from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from AmbariConfig import AmbariConfig
+from resource_management.core.sudo import path_isfile
+
 logger = logging.getLogger()
 
+
 class Hardware:
   SSH_KEY_PATTERN = 'ssh.*key'
   WINDOWS_GET_DRIVES_CMD = "foreach ($drive in [System.IO.DriveInfo]::getdrives()){$available = $drive.TotalFreeSpace;$used = $drive.TotalSize-$drive.TotalFreeSpace;$percent = ($used*100)/$drive.TotalSize;$size = $drive.TotalSize;$type = $drive.DriveFormat;$mountpoint = $drive.RootDirectory.FullName;echo \"$available $used $percent% $size $type $mountpoint\"}"
   CHECK_REMOTE_MOUNTS_KEY = 'agent.check.remote.mounts'
   CHECK_REMOTE_MOUNTS_TIMEOUT_KEY = 'agent.check.mounts.timeout'
   CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT = '10'
+  IGNORE_ROOT_MOUNTS = ["proc", "dev", "sys"]
+  IGNORE_DEVICES = ["proc", "tmpfs", "cgroup", "mqueue", "shm"]
+  LINUX_PATH_SEP = "/"
 
   def __init__(self):
-    self.hardware = {}
-    self.hardware['mounts'] = Hardware.osdisks()
+    logger.info("Initializing host system information.")
+    self.hardware = {
+      'mounts': Hardware.osdisks()
+    }
     self.hardware.update(Facter().facterInfo())
-    pass
+    logger.info("Host system information: %s", self.hardware)
 
-  @staticmethod
-  def extractMountInfo(outputLine):
-    if outputLine == None or len(outputLine) == 0:
-      return None
+  @classmethod
+  def _parse_df_line(cls, line):
+    """
+      Initialize data-structure from string in specific 'df' command output format
+      Expected string format:
+       device fs_type disk_size used_size available_size capacity_used_percents mount_point
+    :type line str
+    """
 
-      """ this ignores any spaces in the filesystemname and mounts """
-    split = outputLine.split()
-    if (len(split)) == 7:
-      device, type, size, used, available, percent, mountpoint = split
-      mountinfo = {
-        'size': size,
-        'used': used,
-        'available': available,
-        'percent': percent,
-        'mountpoint': mountpoint,
-        'type': type,
-        'device': device}
-      return mountinfo
-    else:
+    line_split = line.split()
+    if len(line_split) != 7:
       return None
 
-  @staticmethod
-  @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-  def osdisks(config = None):
-    """ Run df to find out the disks on the host. Only works on linux
-    platforms. Note that this parser ignores any filesystems with spaces
-    and any mounts with spaces. """
-    mounts = []
-    command = []
-    timeout = Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT
-    if config and \
-        config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) and \
-        config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) != "0":
-        timeout = config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY)
-    command.append("timeout")
-    command.append(timeout)
-    command.append("df")
-    command.append("-kPT")
-    if config and \
-        config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY) and \
+    titles = ["device", "type", "size", "used", "available", "percent", "mountpoint"]
+    return dict(zip(titles, line_split))
+
+  @classmethod
+  def _get_mount_check_timeout(cls, config=None):
+    """Return timeout for df call command"""
+    if config and config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) \
+      and config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) != "0":
+
+      return config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY)
+
+    return Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT
+
+  @classmethod
+  def _check_remote_mounts(cls, config=None):
+    """Verify if remote mount allowed to be processed or not"""
+    if config and config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY) and \
         config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY).lower() == "false":
-      #limit listing to local file systems
+
+      return False
+
+    return True
+
+  @classmethod
+  def _is_mount_blacklisted(cls, blacklist, mount_point):
+    """
+    Verify if particular mount point is in the black list.
+    :return True if mount_point or a part of mount point is in the blacklist, otherwise return False
+     Example:
+       Mounts: /, /mnt/my_mount, /mnt/my_mount/sub_mount
+       Blacklist: /mnt/my_mount
+       Result: /
+    :type blacklist list
+    :type mount_point str
+    :rtype bool
+    """
+
+    if not blacklist or not mount_point:
+      return False
+
+    mount_point_elements = mount_point.split(cls.LINUX_PATH_SEP)
+
+    for el in blacklist:
+      el_list = el.split(cls.LINUX_PATH_SEP)
+      # making patch elements comparision
+      if el_list == mount_point_elements[:len(el_list)]:
+        return True
+
+    return False
+
+  @classmethod
+  def osdisks(cls, config=None):
+    """ Run df to find out the disks on the host. Only works on linux
+      platforms. Note that this parser ignores any filesystems with spaces
+      and any mounts with spaces. """
+    timeout = cls._get_mount_check_timeout(config)
+    command = ["timeout", timeout, "df", "-kPT"]
+    blacklisted_mount_points = []
+
+    if config:
+      ignore_mount_value = config.get("agent", "ignore_mount_points", default="")
+      blacklisted_mount_points = [item.strip() for item in ignore_mount_value.split(",")]
+
+    if not cls._check_remote_mounts(config):
       command.append("-l")
 
-    df = subprocess.Popen(command, stdout=subprocess.PIPE)
-    dfdata = df.communicate()[0]
-    lines = dfdata.splitlines()
-    for l in lines:
-      mountinfo = Hardware.extractMountInfo(l)
-      if mountinfo != None and Hardware._chk_mount(mountinfo['mountpoint']):
-        mounts.append(mountinfo)
-      pass
-    pass
-    return mounts
-
-  @staticmethod
-  def _chk_mount(mountpoint):
     try:
-      return call(['test', '-w', mountpoint], sudo=True, timeout=int(Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT)/2)[0] == 0
-    except ExecuteTimeoutException:
-      logger.exception("Exception happened while checking mount {0}".format(mountpoint))
-      return False
-    
-  @staticmethod
-  @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-  def osdisks(config = None):
-    mounts = []
-    runner = shellRunner()
-    command_result = runner.runPowershell(script_block=Hardware.WINDOWS_GET_DRIVES_CMD)
-    if command_result.exitCode != 0:
-      return mounts
-    else:
-      for drive in [line for line in command_result.output.split(os.linesep) if line != '']:
-        available, used, percent, size, type, mountpoint = drive.split(" ")
-        mounts.append({"available": available,
-                       "used": used,
-                       "percent": percent,
-                       "size": size,
-                       "type": type,
-                       "mountpoint": mountpoint})
+      code, out, err = shell.call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=int(timeout), quiet=True)
+      dfdata = out
+    except Exception as ex:
+      logger.warn("Checking disk usage failed: " + str(ex))
+      dfdata = ''
+
+    mounts = [cls._parse_df_line(line) for line in dfdata.splitlines() if line]
+    result_mounts = []
+    ignored_mounts = []
+
+    for mount in mounts:
+      if not mount:
+        continue
 
-    return mounts
+      """
+      We need to filter mounts by several parameters:
+       - mounted device is not in the ignored list
+       - is accessible to user under which current process running
+       - it is not file-mount (docker environment)
+       - mount path or a part of mount path is not in the blacklist
+      """
+      if mount["device"] not in cls.IGNORE_DEVICES and \
+          mount["mountpoint"].split("/")[0] not in cls.IGNORE_ROOT_MOUNTS and \
+        cls._chk_writable_mount(mount['mountpoint']) and \
+        not path_isfile(mount["mountpoint"]) and \
+        not cls._is_mount_blacklisted(blacklisted_mount_points, mount["mountpoint"]):
+
+        result_mounts.append(mount)
+      else:
+        ignored_mounts.append(mount)
+
+    if len(ignored_mounts) > 0:
+      ignore_list = [el["mountpoint"] for el in ignored_mounts]
+      logger.info("Some mount points were ignored: {0}".format(', '.join(ignore_list)))
+
+    return result_mounts
+
+  @classmethod
+  def _chk_writable_mount(cls, mount_point):
+    if os.geteuid() == 0:
+      return os.access(mount_point, os.W_OK)
+    else:
+      try:
+        # test if mount point is writable for current user
+        call_result = call(['test', '-w', mount_point],
+                           sudo=True,
+                           timeout=int(Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT) / 2,
+                           quiet=not logger.isEnabledFor(logging.DEBUG))
+        return call_result and call_result[0] == 0
+      except ExecuteTimeoutException:
+        logger.exception("Exception happened while checking mount {0}".format(mount_point))
+        return False
+      except Fail:
+        logger.exception("Exception happened while checking mount {0}".format(mount_point))
+        return False
+    
 
   def get(self):
     return self.hardware
 
-def main(argv=None):
-  hardware = Hardware()
-  print hardware.get()
+
+def main():
+  from resource_management.core.logger import Logger
+  Logger.initialize_logger()
+
+  print Hardware().get()
 
 if __name__ == '__main__':
   main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/test/python/ambari_agent/TestController.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestController.py b/ambari-agent/src/test/python/ambari_agent/TestController.py
index 59b6276..59b41cd 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestController.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestController.py
@@ -186,7 +186,7 @@ class TestController(unittest.TestCase):
 
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(FacterLinux, "facterInfo", new = MagicMock(return_value={}))
   @patch.object(FacterLinux, "__init__", new = MagicMock(return_value = None))
   @patch("urllib2.build_opener")
@@ -228,7 +228,7 @@ class TestController(unittest.TestCase):
 
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(FacterLinux, "facterInfo", new = MagicMock(return_value={}))
   @patch.object(FacterLinux, "__init__", new = MagicMock(return_value = None))
   @patch("urllib2.build_opener")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/test/python/ambari_agent/TestHardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index 661dac6..551f828 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-'''
+"""
 Licensed to the Apache Software Foundation (ASF) under one
 or more contributor license agreements.  See the NOTICE file
 distributed with this work for additional information
@@ -16,36 +16,45 @@ distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-'''
+"""
+
 from ambari_agent import main
 main.MEMORY_LEAK_DEBUG_FILEPATH = "/tmp/memory_leak_debug.out"
 from unittest import TestCase
-from mock.mock import patch, MagicMock
+from mock.mock import patch, MagicMock, Mock
 import unittest
 import platform
 import socket
+import os
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 from ambari_agent import hostname
 from ambari_agent.Hardware import Hardware
 from ambari_agent.AmbariConfig import AmbariConfig
 from ambari_agent.Facter import Facter, FacterLinux
 from ambari_commons import OSCheck
-from glob import glob
+from resource_management.core.logger import Logger
+
 
 @not_for_platform(PLATFORM_WINDOWS)
-@patch.object(platform,"linux_distribution", new = ('Suse','11','Final'))
-@patch.object(socket, "getfqdn", new = MagicMock(return_value = "ambari.apache.org"))
-@patch.object(socket, "gethostbyname", new = MagicMock(return_value = "192.168.1.1"))
-@patch.object(FacterLinux, "setDataIfConfigShortOutput", new = MagicMock(return_value ='''Iface   MTU Met    RX-OK RX-ERR RX-DRP RX-OVR    TX-OK TX-ERR TX-DRP TX-OVR Flg
+@patch.object(platform, "linux_distribution", new=MagicMock(return_value=('Suse', '11', 'Final')))
+@patch.object(socket, "getfqdn", new=MagicMock(return_value="ambari.apache.org"))
+@patch.object(socket, "gethostbyname", new=MagicMock(return_value="192.168.1.1"))
+@patch.object(FacterLinux, "setDataIfConfigShortOutput", new=MagicMock(return_value='''Iface   MTU Met    RX-OK RX-ERR RX-DRP RX-OVR    TX-OK TX-ERR TX-DRP TX-OVR Flg
 eth0   1500   0     9986      0      0      0     5490      0      0      0 BMRU
 eth1   1500   0        0      0      0      0        6      0      0      0 BMRU
 eth2   1500   0        0      0      0      0        6      0      0      0 BMRU
 lo    16436   0        2      0      0      0        2      0      0      0 LRU'''))
 class TestHardware(TestCase):
+
+  def setUp(self):
+    Logger.logger = MagicMock()
+
+  def tearDown(self):
+    Logger.logger = None
  
-  @patch.object(Hardware, "osdisks", new = MagicMock(return_value=[]))
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
-  @patch.object(FacterLinux, "get_ip_address_by_ifname", new = MagicMock(return_value=None))
+  @patch.object(Hardware, "osdisks", new=MagicMock(return_value=[]))
+  @patch.object(Hardware, "_chk_writable_mount", new=MagicMock(return_value=True))
+  @patch.object(FacterLinux, "get_ip_address_by_ifname", new=MagicMock(return_value=None))
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_build(self, get_os_version_mock, get_os_type_mock):
@@ -57,23 +66,60 @@ class TestHardware(TestCase):
     for dev_item in result['mounts']:
       self.assertTrue(dev_item['available'] >= 0)
       self.assertTrue(dev_item['used'] >= 0)
-      self.assertTrue(dev_item['percent'] != None)
-      self.assertTrue(dev_item['device'] != None)
-      self.assertTrue(dev_item['mountpoint'] != None)
-      self.assertTrue(dev_item['type'] != None)
+      self.assertTrue(dev_item['percent'] is not None)
+      self.assertTrue(dev_item['device'] is not None)
+      self.assertTrue(dev_item['mountpoint'] is not None)
+      self.assertTrue(dev_item['type'] is not None)
       self.assertTrue(dev_item['size'] > 0)
 
     for os_disk_item in osdisks:
       self.assertTrue(os_disk_item['available'] >= 0)
       self.assertTrue(os_disk_item['used'] >= 0)
-      self.assertTrue(os_disk_item['percent'] != None)
-      self.assertTrue(os_disk_item['device'] != None)
-      self.assertTrue(os_disk_item['mountpoint'] != None)
-      self.assertTrue(os_disk_item['type'] != None)
+      self.assertTrue(os_disk_item['percent'] is not None)
+      self.assertTrue(os_disk_item['device'] is not None)
+      self.assertTrue(os_disk_item['mountpoint'] is not None)
+      self.assertTrue(os_disk_item['type'] is not None)
       self.assertTrue(os_disk_item['size'] > 0)
 
     self.assertTrue(len(result['mounts']) == len(osdisks))
 
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  @patch("resource_management.core.shell.call")
+  def test_osdisks_parsing(self, shell_call_mock, isfile_mock, chk_writable_mount_mock):
+    df_output =\
+                """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+                /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+                tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
+                tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
+                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
+                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
+                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
+                shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
+                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
+                """
+
+    def isfile_side_effect(path):
+      assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
+      return path in assume_files
+
+    def chk_writable_mount_side_effect(path):
+      assume_read_only = ["/run/secrets"]
+      return path not in assume_read_only
+
+    isfile_mock.side_effect = isfile_side_effect
+    chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
+    shell_call_mock.return_value = 0, df_output, ""
+
+    result = Hardware.osdisks()
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
+
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   @patch("subprocess.Popen")
@@ -83,57 +129,66 @@ class TestHardware(TestCase):
     get_os_type_mock.return_value = "suse"
     get_os_version_mock.return_value = "11"
     Hardware.osdisks()
-    popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1)
+
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 10 df -kPT'])
+
     config = AmbariConfig()
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1)
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 10 df -kPT'])
+
     config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY)
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true")
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1)
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 10 df -kPT'])
+
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false")
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(['timeout', '10', "df","-kPT", "-l"], stdout=-1)
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 10 df -kPT -l'])
+
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0")
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(['timeout', '10', "df","-kPT","-l"], stdout=-1)
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 10 df -kPT -l'])
+
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "1")
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(["timeout","1","df","-kPT","-l"], stdout=-1)
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 1 df -kPT -l'])
+
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "2")
     Hardware.osdisks(config)
-    popen_mock.assert_called_with(["timeout","2","df","-kPT","-l"], stdout=-1)
-
-
-  def test_extractMountInfo(self):
-    outputLine = "device type size used available percent mountpoint"
-    result = Hardware.extractMountInfo(outputLine)
-
-    self.assertEquals(result['device'], 'device')
-    self.assertEquals(result['type'], 'type')
-    self.assertEquals(result['size'], 'size')
-    self.assertEquals(result['used'], 'used')
-    self.assertEquals(result['available'], 'available')
-    self.assertEquals(result['percent'], 'percent')
-    self.assertEquals(result['mountpoint'], 'mountpoint')
-
-    outputLine = ""
-    result = Hardware.extractMountInfo(outputLine)
-
-    self.assertEquals(result, None)
-
-    outputLine = "device type size used available percent"
-    result = Hardware.extractMountInfo(outputLine)
-
-    self.assertEquals(result, None)
-
-    outputLine = "device type size used available percent mountpoint info"
-    result = Hardware.extractMountInfo(outputLine)
-
-    self.assertEquals(result, None)
-
-  @patch.object(FacterLinux, "get_ip_address_by_ifname", new = MagicMock(return_value=None))
-  @patch.object(hostname,"hostname")
+    self.assertEquals(popen_mock.call_args[0][0], ['/bin/bash', '--login', '--noprofile', '-c', 'timeout 2 df -kPT -l'])
+
+  def test_parse_df_line(self):
+    df_line_sample = "device type size used available percent mountpoint"
+
+    samples = [
+      {
+        "sample": df_line_sample,
+        "expected": dict(zip(df_line_sample.split(), df_line_sample.split()))
+      },
+      {
+        "sample": "device type size used available percent",
+        "expected": None,
+      },
+      {
+        "sample": "device type size used available percent mountpoint info",
+        "expected": None,
+      },
+      {
+        "sample": "",
+        "expected": None
+      }
+    ]
+
+    for sample in samples:
+      result = Hardware._parse_df_line(sample["sample"])
+      self.assertEquals(result, sample["expected"], "Failed with sample: '{0}', expected: {1}, got: {2}".format(
+        sample["sample"],
+        sample["expected"],
+        result
+      ))
+
+  @patch.object(FacterLinux, "get_ip_address_by_ifname", new=MagicMock(return_value=None))
+  @patch.object(hostname, "hostname")
   @patch.object(FacterLinux, "getFqdn")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
@@ -148,7 +203,7 @@ class TestHardware(TestCase):
     self.assertEquals(result['domain'], "apache.org")
     self.assertEquals(result['fqdn'], (result['hostname'] + '.' + result['domain']))
 
-  @patch.object(FacterLinux, "get_ip_address_by_ifname", new = MagicMock(return_value=None))
+  @patch.object(FacterLinux, "get_ip_address_by_ifname", new=MagicMock(return_value=None))
   @patch.object(FacterLinux, "setDataUpTimeOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
@@ -163,7 +218,7 @@ class TestHardware(TestCase):
     self.assertEquals(result['uptime_hours'], '73')
     self.assertEquals(result['uptime_days'], '3')
 
-  @patch.object(FacterLinux, "get_ip_address_by_ifname", new = MagicMock(return_value=None))
+  @patch.object(FacterLinux, "get_ip_address_by_ifname", new=MagicMock(return_value=None))
   @patch.object(FacterLinux, "setMemInfoOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
@@ -238,8 +293,7 @@ SwapFree:        1598676 kB
     self.assertTrue(get_ip_address_by_ifname_mock.called)
     self.assertEquals(result['netmask'], None)
 
-
-  @patch.object(FacterLinux, "get_ip_address_by_ifname", new = MagicMock(return_value=None))
+  @patch.object(FacterLinux, "get_ip_address_by_ifname", new=MagicMock(return_value=None))
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_family")
   @patch.object(OSCheck, "get_os_version")
@@ -267,7 +321,6 @@ SwapFree:        1598676 kB
     self.assertEquals(result['operatingsystem'], 'some_type_of_os')
     self.assertEquals(result['osfamily'], 'My_new_family')
 
-
   @patch("os.path.exists")
   @patch("os.path.isdir")
   @patch("json.loads")
@@ -315,6 +368,66 @@ SwapFree:        1598676 kB
     self.assertEquals(2, json_mock.call_count)
     self.assertEquals('value', result['key'])
 
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  @patch("resource_management.core.shell.call")
+  def test_osdisks_blacklist(self, shell_call_mock, isfile_mock, chk_writable_mount_mock):
+    df_output = \
+      """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+      tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
+      tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
+      shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount/sub-dir
+      """
+
+    def isfile_side_effect(path):
+      assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
+      return path in assume_files
+
+    def chk_writable_mount_side_effect(path):
+      assume_read_only = ["/run/secrets"]
+      return path not in assume_read_only
+
+    isfile_mock.side_effect = isfile_side_effect
+    chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
+    shell_call_mock.return_value = 0, df_output, ""
+
+    config_dict = {
+      "agent": {
+        "ignore_mount_points": "/mnt/blacklisted_mount"
+      }
+    }
+
+    def conf_get(section, key, default=""):
+      if section in config_dict and key in config_dict[section]:
+        return config_dict[section][key]
+
+      return default
+
+    def has_option(section, key):
+      return section in config_dict and key in config_dict[section]
+
+    conf = Mock()
+    attr = {
+      'get.side_effect': conf_get,
+      'has_option.side_effect': has_option
+    }
+    conf.configure_mock(**attr)
+
+    result = Hardware.osdisks(conf)
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py b/ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
index 99ccb4c..19fad56 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHeartbeat.py
@@ -76,7 +76,7 @@ class TestHeartbeat(TestCase):
     self.assertEquals(not heartbeat.reports, True, "Heartbeat should not contain task in progress")
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(ActionQueue, "result")
   @patch.object(HostInfoLinux, "register")
   def test_no_mapping(self, register_mock, result_mock, Popen_mock):
@@ -202,7 +202,7 @@ class TestHeartbeat(TestCase):
     self.assertEquals(hb, expected)
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(HostInfoLinux, 'register')
   def test_heartbeat_no_host_check_cmd_in_queue(self, register_mock, Popen_mock):
     config = AmbariConfig.AmbariConfig()
@@ -231,7 +231,7 @@ class TestHeartbeat(TestCase):
 
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(HostInfoLinux, 'register')
   def test_heartbeat_host_check_no_cmd(self, register_mock, Popen_mock):
     config = AmbariConfig.AmbariConfig()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
index 0a70df2..f5e0288 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
@@ -34,7 +34,7 @@ from ambari_agent.Facter import FacterLinux
 class TestRegistration(TestCase):
 
   @patch("subprocess.Popen")
-  @patch.object(Hardware, "_chk_mount", new = MagicMock(return_value=True))
+  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
   @patch.object(FacterLinux, "facterInfo", new = MagicMock(return_value={}))
   @patch.object(FacterLinux, "__init__", new = MagicMock(return_value = None))
   @patch("ambari_commons.firewall.run_os_command")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 5926c39..abfab87 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,13 +70,11 @@ def main(argv=None):
   if len(args) < 3:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   action = args[0]
   if action not in ALL_ACTIONS:
     sys.stderr.write(USAGE)
     sys.exit(2)
-    pass
 
   hostsFile = args[1]
   servicesFile = args[2]
@@ -89,6 +87,7 @@ def main(argv=None):
   stackName = services["Versions"]["stack_name"]
   stackVersion = services["Versions"]["stack_version"]
   parentVersions = []
+
   if "stack_hierarchy" in services["Versions"]:
     parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
 
@@ -96,8 +95,9 @@ def main(argv=None):
 
   # Perform action
   actionDir = os.path.realpath(os.path.dirname(args[1]))
-  result = {}
-  result_file = "non_valid_result_file.json"
+
+  # filter
+  hosts = stackAdvisor.filterHostMounts(hosts, services)
 
   if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
     result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,12 +111,11 @@ def main(argv=None):
   elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
     result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
     result_file = os.path.join(actionDir, "configurations.json")
-  else: # action == VALIDATE_CONFIGURATIONS
+  else:  # action == VALIDATE_CONFIGURATIONS
     result = stackAdvisor.validateConfigurations(services, hosts)
     result_file = os.path.join(actionDir, "configurations-validation.json")
 
   dumpJson(result, result_file)
-  pass
 
 
 def instantiateStackAdvisor(stackName, stackVersion, parentVersions):

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 81cb175..8070ade 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -229,4 +229,14 @@ gpgcheck=0</value>
     <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>agent_mounts_ignore_list</name>
+    <value/>
+    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+        <visible>true</visible>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index bfd648a..928fa92 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -99,9 +99,23 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
       "AMBARI_METRICS": self.recommendAmsConfigurations,
-      "RANGER": self.recommendRangerConfigurations
+      "RANGER": self.recommendRangerConfigurations,
+      "ZOOKEEPER": self.recommendZookeeperConfigurations,
+      "OOZIE": self.recommendOozieConfigurations
     }
 
+  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    oozie_mount_properties = [
+      ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
+    ]
+    self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
+
+  def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
+    zk_mount_properties = [
+      ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
+    ]
+    self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
+
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
     putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -114,6 +128,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
+    yarn_mount_properties = [
+      ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
+      ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
+      ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
+      ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
+    ]
+
+    self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
+
     sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
     if sc_queue_name is not None:
       putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -144,6 +167,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
     putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
     putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+    mapred_mounts = [
+      ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
+    ]
+
+    self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
+
     mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
     if mr_queue is not None:
       putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -321,12 +351,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       if len(namenodes.split(',')) > 1:
         putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
 
-    #Initialize default 'dfs.datanode.data.dir' if needed
-    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
-      dataDirs = '/hadoop/hdfs/data'
-      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
-    else:
-      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+    hdfs_mount_properties = [
+      ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
+      ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
+      ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
+    ]
+
+    self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
+
+    dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
 
     # dfs.datanode.du.reserved should be set to 10-15% of volume size
     # For each host selects maximum size of the volume. Then gets minimum for all hosts.

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 9678dc1..17225d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,12 +24,30 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
     childRecommendConfDict = {
       "OOZIE": self.recommendOozieConfigurations,
       "HIVE": self.recommendHiveConfigurations,
-      "TEZ": self.recommendTezConfigurations
+      "TEZ": self.recommendTezConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    storm_mounts = [
+      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+    ]
+
+    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+    falcon_mounts = [
+      ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
+    ]
+
+    self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
+
   def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+
     oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
     oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
     putOozieProperty = self.putProperty(configurations, "oozie-site", services)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 4854514..4f0a9d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,10 +44,17 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "LOGSEARCH" : self.recommendLogsearchConfigurations,
       "SPARK": self.recommendSparkConfigurations,
+      "KAFKA": self.recommendKafkaConfigurations,
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
 
+  def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
+    kafka_mounts = [
+      ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+    ]
+
+    self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
 
   def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index f6191f8..8148379 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -972,6 +972,18 @@ class DefaultStackAdvisor(StackAdvisor):
       return None
     return siteConfig.get("properties")
 
+  def getServicesSiteProperties(self, services, siteName):
+    if not services:
+      return None
+
+    configurations = services.get("configurations")
+    if not configurations:
+      return None
+    siteConfig = configurations.get(siteName)
+    if siteConfig is None:
+      return None
+    return siteConfig.get("properties")
+
   def putProperty(self, config, configType, services=None):
     userConfigs = {}
     changedConfigs = []
@@ -1040,14 +1052,27 @@ class DefaultStackAdvisor(StackAdvisor):
       config[configType]["property_attributes"][key][attribute] = attributeValue if isinstance(attributeValue, list) else str(attributeValue)
     return appendPropertyAttribute
 
-
-  """
-  Returns the hosts which are running the given component.
-  """
   def getHosts(self, componentsList, componentName):
+    """
+    Returns the hosts which are running the given component.
+    """
     hostNamesList = [component["hostnames"] for component in componentsList if component["component_name"] == componentName]
     return hostNamesList[0] if len(hostNamesList) > 0 else []
 
+  def getMountPoints(self, hosts):
+    """
+    Return list of mounts available on the hosts
+
+    :type hosts dict
+    """
+    mount_points = []
+
+    for item in hosts["items"]:
+      if "disk_info" in item["Hosts"]:
+        mount_points.append(item["Hosts"]["disk_info"])
+
+    return mount_points
+
   def isSecurityEnabled(self, services):
     """
     Determines if security is enabled by testing the value of cluster-env/security enabled.
@@ -1084,3 +1109,179 @@ class DefaultStackAdvisor(StackAdvisor):
 
   def getServiceNames(self, services):
     return [service["StackServices"]["service_name"] for service in services["services"]]
+
+  def filterHostMounts(self, hosts, services):
+    """
+    Filter mounts on the host using agent_mounts_ignore_list, by excluding and record with mount-point
+     mentioned in agent_mounts_ignore_list.
+
+    This function updates hosts dictionary
+
+    Example:
+
+      agent_mounts_ignore_list : "/run/secrets"
+
+      Hosts record :
+
+       "disk_info" : [
+          {
+              ...
+            "mountpoint" : "/"
+          },
+          {
+              ...
+            "mountpoint" : "/run/secrets"
+          }
+        ]
+
+      Result would be :
+
+        "disk_info" : [
+          {
+              ...
+            "mountpoint" : "/"
+          }
+        ]
+
+    :type hosts dict
+    :type services dict
+    """
+    if not services or "items" not in hosts:
+      return hosts
+
+    banned_filesystems = ["devtmpfs", "tmpfs", "vboxsf", "cdfs"]
+    banned_mount_points = ["/etc/resolv.conf", "/etc/hostname", "/boot", "/mnt", "/tmp", "/run/secrets"]
+
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    ignore_list = []
+
+    if cluster_env and "agent_mounts_ignore_list" in cluster_env and cluster_env["agent_mounts_ignore_list"].strip():
+      ignore_list = [x.strip() for x in cluster_env["agent_mounts_ignore_list"].strip().split(",")]
+
+    ignore_list.extend(banned_mount_points)
+
+    for host in hosts["items"]:
+      if "Hosts" not in host and "disk_info" not in host["Hosts"]:
+        continue
+
+      host = host["Hosts"]
+      disk_info = []
+
+      for disk in host["disk_info"]:
+        if disk["mountpoint"] not in ignore_list\
+          and disk["type"].lower() not in banned_filesystems:
+          disk_info.append(disk)
+
+      host["disk_info"] = disk_info
+
+    return hosts
+
+  def __getSameHostMounts(self, hosts):
+    """
+    Return list of the mounts which are same and present on all hosts
+
+    :type hosts dict
+    :rtype list
+    """
+    if not hosts:
+      return None
+
+    hostMounts = self.getMountPoints(hosts)
+    mounts = []
+    for m in hostMounts:
+      host_mounts = set([item["mountpoint"] for item in m])
+      mounts = host_mounts if not mounts else mounts & host_mounts
+
+    return sorted(mounts)
+
+  def getMountPathVariations(self, initial_value, component_name, services, hosts):
+    """
+    Recommends best fitted mount by prefixing path with it.
+
+    :return return list of paths with properly selected paths. If no recommendation possible,
+     would be returned empty list
+
+    :type initial_value str
+    :type component_name str
+    :type services dict
+    :type hosts dict
+    :rtype list
+    """
+    available_mounts = []
+
+    if not initial_value:
+      return available_mounts
+
+    mounts = self.__getSameHostMounts(hosts)
+    sep = "/"
+
+    if not mounts:
+      return available_mounts
+
+    for mount in mounts:
+      new_mount = initial_value if mount == "/" else os.path.join(mount + sep, initial_value.lstrip(sep))
+      if new_mount not in available_mounts:
+        available_mounts.append(new_mount)
+
+    # no list transformations after filling the list, because this will cause item order change
+    return available_mounts
+
+  def getMountPathVariation(self, initial_value, component_name, services, hosts):
+    """
+    Recommends best fitted mount by prefixing path with it.
+
+    :return return list of paths with properly selected paths. If no recommendation possible,
+     would be returned empty list
+
+    :type initial_value str
+        :type component_name str
+    :type services dict
+    :type hosts dict
+    :rtype str
+    """
+    try:
+      return [self.getMountPathVariations(initial_value, component_name, services, hosts)[0]]
+    except IndexError:
+      return []
+
+  def updateMountProperties(self, siteConfig, propertyDefinitions, configurations,  services, hosts):
+    """
+    Update properties according to recommendations for available mount-points
+
+    propertyDefinitions is an array of set : property name, component name, initial value, recommendation type
+
+     Where,
+
+       property name - name of the property
+       component name, name of the component to which belongs this property
+       initial value - initial path
+       recommendation type - could be "multi" or "single". This describes recommendation strategy, to use only one disk
+        or use all available space on the host
+
+    :type propertyDefinitions list
+    :type siteConfig str
+    :type configurations dict
+    :type services dict
+    :type hosts dict
+    """
+
+    props = self.getServicesSiteProperties(services, siteConfig)
+    put_f = self.putProperty(configurations, siteConfig, services)
+
+    for prop_item in propertyDefinitions:
+      name, component, default_value, rc_type = prop_item
+      recommendation = None
+
+      if props is None or name not in props:
+        if rc_type == "multi":
+          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+        else:
+          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+      elif props and name in props and props[name] == default_value:
+        if rc_type == "multi":
+          recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+        else:
+          recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+
+      if recommendation:
+        put_f(name, ",".join(recommendation))

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index a70922f..927f0de 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1191,8 +1191,10 @@ class TestHDP206StackAdvisor(TestCase):
                   {'properties':
                      {'falcon_user': 'falcon'}},
                 'hdfs-site':
-                  {'properties': 
+                  {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1326,6 +1328,8 @@ class TestHDP206StackAdvisor(TestCase):
                 'hdfs-site':
                   {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hive-env':
                   {'properties':
@@ -1459,8 +1463,10 @@ class TestHDP206StackAdvisor(TestCase):
                      {'hive_user': 'hive',
                       'webhcat_user': 'webhcat'}},
                 'hdfs-site':
-                  {'properties': 
+                  {'properties':
                      {'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+                      'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+                      'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
                       'dfs.datanode.du.reserved': '10240000000'}},
                 'hadoop-env':
                   {'properties':
@@ -1480,10 +1486,12 @@ class TestHDP206StackAdvisor(TestCase):
 
     expected["hdfs-site"] = {
       'properties': {
-        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
         'dfs.datanode.du.reserved': '10240000000',
         'dfs.internal.nameservices': 'mycluster',
-        'dfs.ha.namenodes.mycluster': 'nn1,nn2'
+        'dfs.ha.namenodes.mycluster': 'nn1,nn2',
+        'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+        'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+        'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
       },
       'property_attributes': {
         'dfs.namenode.rpc-address': {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index 7835262..f9fb1f5 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -487,6 +487,8 @@ class TestHDP21StackAdvisor(TestCase):
       "hdfs-site": {
         "properties": {
           'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
           'dfs.datanode.du.reserved': '10240000000'
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 388fc3b..01ab62f 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3026,7 +3026,9 @@ class TestHDP22StackAdvisor(TestCase):
           'dfs.namenode.safemode.threshold-pct': '1.000',
           'dfs.datanode.failed.volumes.tolerated': '1',
           'dfs.namenode.handler.count': '25',
-          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
+          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4',
+          'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+          'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary'
         },
         'property_attributes': {
           'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'},
@@ -3639,12 +3641,9 @@ class TestHDP22StackAdvisor(TestCase):
             "cpu_count" : 6,
             "total_mem" : 50331648,
             "disk_info" : [
-              {"mountpoint" : "/"},
-              {"mountpoint" : "/dev/shm"},
-              {"mountpoint" : "/vagrant"},
-              {"mountpoint" : "/"},
-              {"mountpoint" : "/dev/shm"},
-              {"mountpoint" : "/vagrant"}
+              {"mountpoint" : "/", "type": "ext3"},
+              {"mountpoint" : "/dev/shm", "type": "tmpfs"},
+              {"mountpoint" : "/vagrant", "type": "vboxsf"}
             ],
             "public_host_name" : "c6401.ambari.apache.org",
             "host_name" : "c6401.ambari.apache.org"
@@ -3694,15 +3693,19 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "43008",
+          "yarn.scheduler.minimum-allocation-mb": "14336",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "43008",
           "yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
         },
         "property_attributes": {
           "yarn.scheduler.minimum-allocation-vcores": {
@@ -3715,18 +3718,19 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           }
         }
       }
     }
 
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
     clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
     self.assertEquals(clusterData['hbaseRam'], 8)
 
@@ -3750,15 +3754,19 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
           "yarn.nodemanager.linux-container-executor.group": "hadoop",
           "yarn.nodemanager.container-executor.cgroups.mount": "true",
-          "yarn.nodemanager.resource.memory-mb": "39424",
-          "yarn.scheduler.minimum-allocation-mb": "3584",
+          "yarn.nodemanager.resource.memory-mb": "43008",
+          "yarn.scheduler.minimum-allocation-mb": "14336",
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
           "yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
-          "yarn.scheduler.maximum-allocation-mb": "39424",
+          "yarn.scheduler.maximum-allocation-mb": "43008",
           "yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
         },
         "property_attributes": {
           "yarn.nodemanager.container-executor.cgroups.mount": {
@@ -3780,13 +3788,13 @@ class TestHDP22StackAdvisor(TestCase):
             "maximum": "49152"
           },
           "yarn.scheduler.minimum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.resource.cpu-vcores": {
             "maximum": "12"
           },
           "yarn.scheduler.maximum-allocation-mb": {
-            "maximum": "39424"
+            "maximum": "43008"
           },
           "yarn.nodemanager.container-executor.resources-handler.class": {
             "delete": "true"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/test/python/stacks/test_stack_adviser.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/test_stack_adviser.py b/ambari-server/src/test/python/stacks/test_stack_adviser.py
new file mode 100644
index 0000000..8146a0c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/test_stack_adviser.py
@@ -0,0 +1,239 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+from unittest import TestCase
+
+
+class TestBasicAdvisor(TestCase):
+  def setUp(self):
+    import imp
+    self.maxDiff = None
+    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+    stackAdvisorPath = os.path.abspath(os.path.join(self.testDirectory, '../../../main/resources/stacks/stack_advisor.py'))
+
+    default_sa_classname = 'DefaultStackAdvisor'
+
+    with open(stackAdvisorPath, 'rb') as fp:
+      stack_advisor_impl = imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+    clazz = getattr(stack_advisor_impl, default_sa_classname)
+    self.stackAdvisor = clazz()
+
+  def test_filterHostMounts(self):
+
+    filtered_mount = "/data"
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant1", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": filtered_mount
+          }
+        }
+      }
+    }
+
+    filtered_hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+    for host in filtered_hosts["items"]:
+      self.assertEquals(False, filtered_mount in host["Hosts"]["disk_info"])
+
+  def test_getMountPathVariations(self):
+
+    filtered_mount = "/data"
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant1", "type": "ext3"},
+              {"mountpoint": filtered_mount, "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": filtered_mount
+          }
+        }
+      }
+    }
+
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+    avail_mounts = self.stackAdvisor.getMountPathVariations("/test/folder", "DATANODE", services, hosts)
+
+    self.assertEquals(True, avail_mounts is not None)
+    self.assertEquals(1, len(avail_mounts))
+    self.assertEquals("/test/folder", avail_mounts[0])
+
+  def test_updateMountProperties(self):
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/dev/shm", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "ext3"},
+              {"mountpoint": "/data", "type": "ext3"},
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        },
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/", "type": "ext3"},
+              {"mountpoint": "/dev/shm1", "type": "ext3"},
+              {"mountpoint": "/vagrant", "type": "vboxsf"},
+              {"mountpoint": "/data", "type": "ext3"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [
+      ],
+      "configurations": {
+        "cluster-env": {
+          "properties": {
+            "agent_mounts_ignore_list": ""
+          }
+        },
+        "some-site": {
+          "path_prop": "/test"
+        }
+      }
+    }
+
+    pathProperties = [
+      ("path_prop", "DATANODE", "/test", "multi"),
+    ]
+
+    configurations = {}
+    hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+    self.stackAdvisor.updateMountProperties("some-site", pathProperties, configurations, services, hosts)
+
+    self.assertEquals("/test,/data/test", configurations["some-site"]["properties"]["path_prop"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_multi_server_tasks.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_multi_server_tasks.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_multi_server_tasks.xml
new file mode 100644
index 0000000..9dac3c4
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_multi_server_tasks.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="upgrade-pack.xsd">
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.0</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks />
+  
+  <order>
+    <group xsi:type="colocated" name="ZOOKEEPER" title="Zookeeper">
+      <skippable>true</skippable>
+      <allow-retry>false</allow-retry>
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+      
+      <batch>
+        <percent>20</percent>
+        <message>Please run additional tests on {{components}}</message>
+      </batch>
+      
+    </group>
+    
+    <group name="CLIENTS" title="Zookeeper Clients">
+      <skippable>true</skippable>
+      <allow-retry>false</allow-retry>
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_CLIENT</component>
+      </service>
+    </group>
+    
+  </order>
+  
+  <processing>
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="manual">
+            <summary>SUMMARY OF PREPARE</summary>
+            <message>This is a manual task with a placeholder of {{foo/bar}}</message>
+          </task>
+          
+          <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type" />
+          <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type" />
+          
+         
+          <task xsi:type="execute">
+            <command>ls</command>
+          </task>
+         
+        </pre-upgrade>
+        <pre-downgrade copy-upgrade="true" />
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+      
+      <component name="ZOOKEEPER_CLIENT">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type" />
+          <task xsi:type="server_action" summary="Verifying LZO codec path for mapreduce" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/>
+        </pre-upgrade>
+        
+        <pre-downgrade />
+        
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+      
+    </service>
+  </processing>
+</upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-web/app/mixins.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins.js b/ambari-web/app/mixins.js
index b4fe115..7a78f6d 100644
--- a/ambari-web/app/mixins.js
+++ b/ambari-web/app/mixins.js
@@ -65,4 +65,3 @@ require('mixins/common/widgets/widget_mixin');
 require('mixins/common/widgets/widget_section');
 require('mixins/unit_convert/base_unit_convert_mixin');
 require('mixins/unit_convert/convert_unit_widget_view_mixin');
-require('utils/configs/mount_points_based_initializer_mixin');

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ac59c3f/ambari-web/app/utils/configs/config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_initializer.js b/ambari-web/app/utils/configs/config_initializer.js
index b4c47f2..384ce1c 100644
--- a/ambari-web/app/utils/configs/config_initializer.js
+++ b/ambari-web/app/utils/configs/config_initializer.js
@@ -20,7 +20,6 @@ var App = require('app');
 var stringUtils = require('utils/string_utils');
 
 require('utils/configs/config_initializer_class');
-require('utils/configs/mount_points_based_initializer_mixin');
 require('utils/configs/hosts_based_initializer_mixin');
 
 /**
@@ -53,7 +52,7 @@ function getZKBasedConfig() {
  *
  * @instance ConfigInitializer
  */
-App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedInitializerMixin, App.HostsBasedInitializerMixin, {
+App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitializerMixin, {
 
   initializers: function() {
     return {
@@ -111,26 +110,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
       'templeton.zookeeper.hosts': getZKBasedConfig(),
       'hadoop.registry.zk.quorum': getZKBasedConfig(),
       'hive.cluster.delegation.token.store.zookeeper.connectString': getZKBasedConfig(),
-      'instance.zookeeper.host': getZKBasedConfig(),
-
-      'dfs.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
-      'dfs.namenode.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
-      'dfs.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
-      'dfs.datanode.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
-      'yarn.nodemanager.local-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
-      'yarn.nodemanager.log-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
-      'mapred.local.dir': this.getMultipleMountPointsConfig(['TASKTRACKER', 'NODEMANAGER']),
-      'log.dirs': this.getMultipleMountPointsConfig('KAFKA_BROKER'),
-
-      'fs.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
-      'dfs.namenode.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
-      'yarn.timeline-service.leveldb-timeline-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
-      'yarn.timeline-service.leveldb-state-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
-      'dataDir': this.getSingleMountPointConfig('ZOOKEEPER_SERVER'),
-      'oozie_data_dir': this.getSingleMountPointConfig('OOZIE_SERVER'),
-      'storm.local.dir': this.getSingleMountPointConfig(['NODEMANAGER', 'NIMBUS']),
-      '*.falcon.graph.storage.directory': this.getSingleMountPointConfig('FALCON_SERVER'),
-      '*.falcon.graph.serialize.path': this.getSingleMountPointConfig('FALCON_SERVER')
+      'instance.zookeeper.host': getZKBasedConfig()
     }
   }.property(''),
 
@@ -145,9 +125,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
   },
 
   initializerTypes: [
-    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'},
-    {name: 'single_mountpoint', method: '_initAsSingleMountPoint'},
-    {name: 'multiple_mountpoints', method: '_initAsMultipleMountPoints'}
+    {name: 'zookeeper_based', method: '_initAsZookeeperServersList'}
   ],
 
   /**