You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2016/03/18 13:59:03 UTC

[2/2] ambari git commit: AMBARI-15468. file_system get_mount_point_for_dir works incorrect (aonishuk)

AMBARI-15468. file_system get_mount_point_for_dir works incorrect (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dd846600
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dd846600
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dd846600

Branch: refs/heads/branch-2.2
Commit: dd846600ffb4cb34228da0120cfd10fb20026f14
Parents: fa9bed0
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Mar 18 14:58:38 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Mar 18 14:58:38 2016 +0200

----------------------------------------------------------------------
 .../resource_management/TestFileSystem.py       | 47 +++++++++++++++++++-
 .../libraries/functions/file_system.py          |  6 ++-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  6 ++-
 3 files changed, 54 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dd846600/ambari-agent/src/test/python/resource_management/TestFileSystem.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestFileSystem.py b/ambari-agent/src/test/python/resource_management/TestFileSystem.py
index 4e0eb63..925758c 100644
--- a/ambari-agent/src/test/python/resource_management/TestFileSystem.py
+++ b/ambari-agent/src/test/python/resource_management/TestFileSystem.py
@@ -34,6 +34,8 @@ class TestFileSystem(TestCase):
     SINGLE_ROOT = 1
     MULT_DRIVE_CONFLICT = 2
     MULT_DRIVE_DISTINCT = 3
+    ONE_SEGMENT_MOUNT = 4
+    SAME_PREFIX_MOUNTS = 5
 
   def _get_mount(self, type):
     """
@@ -64,6 +66,13 @@ class TestFileSystem(TestCase):
       out += os.linesep + \
              "/dev/sda1 on /hadoop/hdfs/data/1 type ext4 (rw)" + os.linesep + \
              "/dev/sda2 on /hadoop/hdfs/data/2 type ext4 (rw)"
+    elif type == self.MOUNT_TYPE.ONE_SEGMENT_MOUNT:
+      out += os.linesep + \
+             "/dev/sda1 on /hadoop type ext4 (rw)"
+    elif type == self.MOUNT_TYPE.SAME_PREFIX_MOUNTS:
+      out += os.linesep + \
+             "/dev/sda1 on /hadoop/hdfs/data type ext4 (rw)" + os.linesep + \
+             "/dev/sda2 on /hadoop/hdfs/data1 type ext4 (rw)"
 
     out_array = [x.split(' ') for x in out.strip().split('\n')]
     mount_val = []
@@ -103,6 +112,9 @@ class TestFileSystem(TestCase):
     """
     mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.SINGLE_ROOT)
 
+    # refresh cached mounts
+    file_system.get_and_cache_mount_points(True)
+
     mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data")
     self.assertEqual(mount_point, "/")
 
@@ -115,8 +127,41 @@ class TestFileSystem(TestCase):
     """
     mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.MULT_DRIVE_DISTINCT)
 
+    # refresh cached mounts
+    file_system.get_and_cache_mount_points(True)
+
     mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/1")
     self.assertEqual(mount_point, "/hadoop/hdfs/data/1")
 
     mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/2")
-    self.assertEqual(mount_point, "/hadoop/hdfs/data/2")
\ No newline at end of file
+    self.assertEqual(mount_point, "/hadoop/hdfs/data/2")
+
+  @patch.object(Logger, "info")
+  @patch.object(Logger, "error")
+  @patch('resource_management.core.providers.mount.get_mounted')
+  def test_one_segment_mount(self, mounted_mock, log_error, log_info):
+    """
+    Testing when the path has one segment.
+    """
+    mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.ONE_SEGMENT_MOUNT)
+
+    # refresh cached mounts
+    file_system.get_and_cache_mount_points(True)
+
+    mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/1")
+    self.assertEqual(mount_point, "/hadoop")
+
+  @patch.object(Logger, "info")
+  @patch.object(Logger, "error")
+  @patch('resource_management.core.providers.mount.get_mounted')
+  def test_same_prefix(self, mounted_mock, log_error, log_info):
+    """
+    Testing when two mount points have the same prefix.
+    """
+    mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.SAME_PREFIX_MOUNTS)
+
+    # refresh cached mounts
+    file_system.get_and_cache_mount_points(True)
+
+    mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data1")
+    self.assertEqual(mount_point, "/hadoop/hdfs/data1")

http://git-wip-us.apache.org/repos/asf/ambari/blob/dd846600/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py b/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py
index 39b86dd..2a859ed 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py
@@ -63,10 +63,12 @@ def get_mount_point_for_dir(dir):
     # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data".
     # So take the one with the greatest number of segments.
     for m in cached_mounts:
-      if dir.startswith(m['mount_point']):
+      # Ensure that the mount path and the dir path ends with "/"
+      # The mount point "/hadoop" should not match the path "/hadoop1"
+      if os.path.join(dir, "").startswith(os.path.join(m['mount_point'], "")):
         if best_mount_found is None:
           best_mount_found = m["mount_point"]
-        elif best_mount_found.count(os.path.sep) < os.path.join(m["mount_point"]).count(os.path.sep):
+        elif os.path.join(best_mount_found, "").count(os.path.sep) < os.path.join(m["mount_point"], "").count(os.path.sep):
           best_mount_found = m["mount_point"]
 
   Logger.info("Mount point for directory %s is %s" % (str(dir), str(best_mount_found)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/dd846600/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index ee2007f..bb1a132 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -1549,10 +1549,12 @@ def getMountPointForDir(dir, mountPoints):
     # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data".
     # So take the one with the greatest number of segments.
     for mountPoint in mountPoints:
-      if dir.startswith(mountPoint):
+      # Ensure that the mount path and the dir path ends with "/"
+      # The mount point "/hadoop" should not match with the path "/hadoop1"
+      if os.path.join(dir, "").startswith(os.path.join(mountPoint, "")):
         if bestMountFound is None:
           bestMountFound = mountPoint
-        elif bestMountFound.count(os.path.sep) < os.path.join(mountPoint, "").count(os.path.sep):
+        elif os.path.join(bestMountFound, "").count(os.path.sep) < os.path.join(mountPoint, "").count(os.path.sep):
           bestMountFound = mountPoint
 
   return bestMountFound