You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/05/22 19:46:38 UTC

git commit: AMBARI-5840. Ambari recomission of datanodes will not work if the active namenode is nn2. (Jonathan Hurley via mahadev)

Repository: ambari
Updated Branches:
  refs/heads/trunk db7c6f50c -> 4c45504be


AMBARI-5840. Ambari recomission of datanodes will not work if the active namenode is nn2. (Jonathan Hurley via mahadev)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4c45504b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4c45504b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4c45504b

Branch: refs/heads/trunk
Commit: 4c45504be3347974f18114de89eb5891f4afd98e
Parents: db7c6f5
Author: Mahadev Konar <ma...@apache.org>
Authored: Thu May 22 10:46:31 2014 -0700
Committer: Mahadev Konar <ma...@apache.org>
Committed: Thu May 22 10:46:31 2014 -0700

----------------------------------------------------------------------
 .../HDFS/package/scripts/hdfs_namenode.py         |  4 +++-
 .../2.0.6/services/HDFS/package/scripts/params.py |  4 ++++
 .../python/stacks/2.0.6/HDFS/test_namenode.py     | 18 ++++++++++++++++++
 3 files changed, 25 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4c45504b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
index 0086b13..67e16ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
@@ -145,7 +145,9 @@ def decommission():
           user=hdfs_user
   )
 
-  ExecuteHadoop('dfsadmin -refreshNodes',
+  # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+  # need to execute each command scoped to a particular namenode
+  ExecuteHadoop(format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes'),
                 user=hdfs_user,
                 conf_dir=conf_dir,
                 kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c45504b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index a6fea72..7a4c667 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -134,7 +134,10 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
 namenode_id = None
+namenode_rpc = None
+
 if dfs_ha_namenode_ids:
   dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
   dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
@@ -145,6 +148,7 @@ if dfs_ha_enabled:
     nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
     if hostname in nn_host:
       namenode_id = nn_id
+      namenode_rpc = nn_host
 
 journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
 if journalnode_address:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c45504b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 54d2175..fa70469 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -370,6 +370,24 @@ class TestNamenode(RMFTestCase):
                               only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
                               )
     self.assertNoMoreResources()
+    
+  def test_decommission_ha(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "decommission",
+                       config_file="ha_default.json"
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Execute', '', user = 'hdfs')
+    self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes', 
+                              user = 'hdfs', 
+                              conf_dir = '/etc/hadoop/conf', 
+                              kinit_override = True)
+    self.assertNoMoreResources()    
 
   def assert_configure_default(self):
     self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',