You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2015/09/25 19:06:39 UTC

ambari git commit: AMBARI-13241 Post RU dfs_data_dir_mount.hist is lost (dsen)

Repository: ambari
Updated Branches:
  refs/heads/trunk ad31c9e8d -> 80e2f2036


AMBARI-13241 Post RU dfs_data_dir_mount.hist is lost (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/80e2f203
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/80e2f203
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/80e2f203

Branch: refs/heads/trunk
Commit: 80e2f2036539e5cf55924c61ab56599092c1987d
Parents: ad31c9e
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Sep 25 20:06:24 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Sep 25 20:06:24 2015 +0300

----------------------------------------------------------------------
 .../resource_management/TestDatanodeHelper.py   | 45 ++------------------
 .../libraries/functions/dfs_datanode_helper.py  | 42 ++++++------------
 .../2.1.0.2.0/package/scripts/hdfs_datanode.py  |  8 +++-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 16 +++++++
 4 files changed, 40 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
index a74cc0b..70539ac 100644
--- a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
+++ b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
@@ -65,8 +65,7 @@ class TestDatanodeHelper(TestCase):
 
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
-  def test_normalized(self, mock_write_data_dir_to_file, log_error, log_info):
+  def test_normalized(self, log_error, log_info):
     """
     Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive.
     """
@@ -88,41 +87,13 @@ class TestDatanodeHelper(TestCase):
 
     self.assertEquals(0, log_error.call_count)
 
-
-  @patch.object(Logger, "info")
-  @patch.object(Logger, "error")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
-  @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
-  @patch.object(os.path, "isdir")
-  def test_save_mount_points(self, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, log_error, log_info):
-    """
-    Test when all mounts are on root.
-    """
-    mock_get_mount_point.side_effect = ["/", "/", "/"] * 2
-    mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
-
-    # Function under test
-    dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
-
-    for (name, args, kwargs) in log_info.mock_calls:
-      print args[0]
-
-    for (name, args, kwargs) in log_error.mock_calls:
-      print args[0]
-
-    self.assertEquals(0, log_error.call_count)
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/", self.grid1: "/", self.grid2: "/"})
-
-
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
   @patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
   @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
   @patch.object(os.path, "isdir")
   @patch.object(os.path, "exists")
-  def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, mock_get_data_dir_to_mount_from_file, log_error, log_info):
+  def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
     """
     Test when grid2 becomes unmounted
     """
@@ -134,7 +105,6 @@ class TestDatanodeHelper(TestCase):
     # Grid2 then becomes unmounted
     mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/"] * 2
     mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
 
     # Function under test
     dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
@@ -151,18 +121,13 @@ class TestDatanodeHelper(TestCase):
     self.assertEquals(1, log_error.call_count)
     self.assertTrue("Directory /grid/2/data does not exist and became unmounted from /dev2" in error_msg)
 
-    # Notice that grid2 is still written with its original mount point because an error occurred on it
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"})
-
-
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
   @patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
   @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
   @patch.object(os.path, "isdir")
   @patch.object(os.path, "exists")
-  def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, mock_get_data_dir_to_mount_from_file, log_error, log_info):
+  def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
     """
     Test when grid2 becomes remounted
     """
@@ -174,7 +139,6 @@ class TestDatanodeHelper(TestCase):
     # Grid2 then becomes remounted
     mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/dev2"] * 2
     mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
 
     # Function under test
     dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
@@ -186,6 +150,3 @@ class TestDatanodeHelper(TestCase):
       print args[0]
 
     self.assertEquals(0, log_error.call_count)
-
-    # Notice that grid2 is now written with its new mount point to prevent a regression
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
index 778d869..33e7b41 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
@@ -25,31 +25,14 @@ import os
 from resource_management.libraries.functions.file_system import get_mount_point_for_dir, get_and_cache_mount_points
 from resource_management.core.logger import Logger
 
-
-def _write_data_dir_to_mount_in_file(params, new_data_dir_to_mount_point):
-  """
-  :param new_data_dir_to_mount_point: Dictionary to write to the data_dir_mount_file file, where
-  the key is each DFS data dir, and the value is its current mount point.
-  :return: Returns True on success, False otherwise.
-  """
-  # Overwrite the existing file, or create it if doesn't exist
-  if params.data_dir_mount_file:
-    try:
-      with open(str(params.data_dir_mount_file), "w") as f:
-        f.write("# This file keeps track of the last known mount-point for each DFS data dir.\n")
-        f.write("# It is safe to delete, since it will get regenerated the next time that the DataNode starts.\n")
-        f.write("# However, it is not advised to delete this file since Ambari may \n")
-        f.write("# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.\n")
-        f.write("# Comments begin with a hash (#) symbol\n")
-        f.write("# data_dir,mount_point\n")
-        for kv in new_data_dir_to_mount_point.iteritems():
-          f.write(kv[0] + "," + kv[1] + "\n")
-    except Exception, e:
-      Logger.error("Encountered error while attempting to save DFS data dir mount mount values to file %s" %
-                   str(params.data_dir_mount_file))
-      return False
-  return True
-
+DATA_DIR_TO_MOUNT_HEADER = """
+# This file keeps track of the last known mount-point for each DFS data dir.
+# It is safe to delete, since it will get regenerated the next time that the DataNode starts.
+# However, it is not advised to delete this file since Ambari may
+# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.
+# Comments begin with a hash (#) symbol
+# data_dir,mount_point
+"""
 
 def get_data_dir_to_mount_from_file(params):
   """
@@ -96,6 +79,7 @@ def handle_dfs_data_dir(func, params, update_cache=True):
                will be called as func(data_dir, params)
   :param params: parameters to pass to function pointer
   :param update_cache: Bool indicating whether to update the global cache of mount points
+  :return: Returns a data_dir_mount_file content
   """
 
   # Get the data dirs that Ambari knows about and their last known mount point
@@ -172,9 +156,6 @@ def handle_dfs_data_dir(func, params, update_cache=True):
       curr_mount_point = get_mount_point_for_dir(data_dir)
       data_dir_to_mount_point[data_dir] = curr_mount_point
 
-  # Save back to the file
-  _write_data_dir_to_mount_in_file(params, data_dir_to_mount_point)
-
   if error_messages and len(error_messages) > 0:
     header = " ERROR ".join(["*****"] * 6)
     header = "\n" + "\n".join([header, ] * 3) + "\n"
@@ -183,4 +164,9 @@ def handle_dfs_data_dir(func, params, update_cache=True):
           "root partition, either update the contents of {0}, or delete that file.".format(params.data_dir_mount_file)
     Logger.error(header + msg + header)
 
+  data_dir_to_mount = DATA_DIR_TO_MOUNT_HEADER
+  for kv in data_dir_to_mount_point.iteritems():
+    data_dir_to_mount += kv[0] + "," + kv[1] + "\n"
+
+  return data_dir_to_mount
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
index df847bd..34ec8cd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
@@ -48,7 +48,13 @@ def datanode(action=None):
               owner=params.hdfs_user,
               group=params.user_group)
 
-    handle_dfs_data_dir(create_dirs, params)
+    File(params.data_dir_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=handle_dfs_data_dir(create_dirs, params)
+    )
+
   elif action == "start" or action == "stop":
     import params
     service(

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 8e6e386..72925a0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -23,6 +23,8 @@ from mock.mock import MagicMock, patch
 from resource_management.libraries.script import Script
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
+import resource_management.libraries.functions.dfs_datanode_helper
+
 
 class TestDatanode(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
@@ -355,6 +357,13 @@ class TestDatanode(RMFTestCase):
                               recursive = True,
                               cd_access='a'
                               )
+    content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = content
+                              )
 
   def assert_configure_secured(self, stackVersion=STACK_VERSION, snappy_enabled=True):
     conf_dir = '/etc/hadoop/conf'
@@ -420,6 +429,13 @@ class TestDatanode(RMFTestCase):
                               recursive = True,
                               cd_access='a'
                               )
+    content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = content
+                              )
 
 
   def test_pre_rolling_restart(self):