You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rn...@apache.org on 2015/06/05 00:28:18 UTC
ambari git commit: AMBARI-11701. Blueprint should not require
single-node properties for NameNodes in an HDFS NameNode HA cluster.
(rnettleton)
Repository: ambari
Updated Branches:
refs/heads/trunk ec196cff1 -> e92007d04
AMBARI-11701. Blueprint should not require single-node properties for NameNodes in an HDFS NameNode HA cluster. (rnettleton)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e92007d0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e92007d0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e92007d0
Branch: refs/heads/trunk
Commit: e92007d0467e52e4137086864c2ca03d6a5ac4cb
Parents: ec196cf
Author: Bob Nettleton <rn...@hortonworks.com>
Authored: Thu Jun 4 18:27:36 2015 -0400
Committer: Bob Nettleton <rn...@hortonworks.com>
Committed: Thu Jun 4 18:27:55 2015 -0400
----------------------------------------------------------------------
.../BlueprintConfigurationProcessor.java | 3 +-
.../BlueprintConfigurationProcessorTest.java | 80 ++++++++++++++++++++
2 files changed, 82 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92007d0/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index c6ff56c..ababc29 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -130,7 +130,8 @@ public class BlueprintConfigurationProcessor {
*/
private static final PropertyFilter[] clusterUpdatePropertyFilters =
{ new DependencyEqualsFilter("hbase.security.authorization", "hbase-site", "true"),
- new DependencyNotEqualsFilter("hive.server2.authentication", "hive-site", "NONE") };
+ new DependencyNotEqualsFilter("hive.server2.authentication", "hive-site", "NONE"),
+ new HDFSNameNodeHAFilter() };
/**
* Configuration properties to be updated
http://git-wip-us.apache.org/repos/asf/ambari/blob/e92007d0/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index d957836..83ed594 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -3954,6 +3954,13 @@ public class BlueprintConfigurationProcessorTest {
hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
hdfsSiteProperties.put("dfs.namenode.secondary.http-address", "localhost:8080");
+
+ // add properties that are used in non-HA HDFS NameNode settings
+ // to verify that these are eventually removed by the filter
+ hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+ hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
// configure the defaultFS to use the nameservice URL
coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
@@ -4026,6 +4033,79 @@ public class BlueprintConfigurationProcessorTest {
assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
"hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+ // verify that the non-HA properties are filtered out in HA mode
+ assertFalse("dfs.namenode.http-address should have been filtered out of this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.http-address"));
+ assertFalse("dfs.namenode.https-address should have been filtered out of this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.https-address"));
+ assertFalse("dfs.namenode.rpc-address should have been filtered out of this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
+
+ }
+
+ @Test
+ public void testDoUpdateForClusterWithNameNodeHANotEnabled() throws Exception {
+ final String expectedHostName = "c6401.apache.ambari.org";
+ final String expectedHostNameTwo = "serverTwo";
+ final String expectedPortNum = "808080";
+ final String expectedHostGroupName = "host_group_1";
+
+ Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+
+ Map<String, String> hdfsSiteProperties = new HashMap<String, String>();
+ Map<String, String> hbaseSiteProperties = new HashMap<String, String>();
+ Map<String, String> hadoopEnvProperties = new HashMap<String, String>();
+ Map<String, String> coreSiteProperties = new HashMap<String, String>();
+ Map<String, String> accumuloSiteProperties =new HashMap<String, String>();
+
+ properties.put("hdfs-site", hdfsSiteProperties);
+ properties.put("hadoop-env", hadoopEnvProperties);
+ properties.put("core-site", coreSiteProperties);
+ properties.put("hbase-site", hbaseSiteProperties);
+ properties.put("accumulo-site", accumuloSiteProperties);
+
+ // add properties that require the SECONDARY_NAMENODE, which
+ // is not included in this test
+ hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.secondary.http-address", "localhost:8080");
+
+
+ // add properties that are used in non-HA HDFS NameNode settings
+ // to verify that these are eventually removed by the filter
+ hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+ hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
+ Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+ Collection<String> hgComponents = new HashSet<String>();
+ hgComponents.add("NAMENODE");
+ hgComponents.add("SECONDARY_NAMENODE");
+ TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, Collections.singleton(expectedHostName));
+
+ Collection<String> hgComponents2 = new HashSet<String>();
+ TestHostGroup group2 = new TestHostGroup("host-group-2", hgComponents2, Collections.singleton(expectedHostNameTwo));
+
+ Collection<TestHostGroup> hostGroups = new ArrayList<TestHostGroup>();
+ hostGroups.add(group1);
+ hostGroups.add(group2);
+
+ expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1-2")).anyTimes();
+ expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
+
+ ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+ BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
+
+ updater.doUpdateForClusterCreate();
+
+ // verify that the non-HA properties are not filtered out in a non-HA cluster
+ assertTrue("dfs.namenode.http-address should have been included in this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.http-address"));
+ assertTrue("dfs.namenode.https-address should have been included in this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.https-address"));
+ assertTrue("dfs.namenode.rpc-address should have been included in this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
+
}
@Test