You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2016/04/14 16:33:55 UTC
ambari git commit: AMBARI-15788 Ambari upgrade should auto-populate
dfs.internal.nameservice (smohanty via dsen)
Repository: ambari
Updated Branches:
refs/heads/trunk 081b48d41 -> 6d081b19a
AMBARI-15788 Ambari upgrade should auto-populate dfs.internal.nameservice (smohanty via dsen)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6d081b19
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6d081b19
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6d081b19
Branch: refs/heads/trunk
Commit: 6d081b19a5637e4f5682daf1dcc27f6b1a5e8b60
Parents: 081b48d
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Apr 14 17:33:44 2016 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Apr 14 17:33:44 2016 +0300
----------------------------------------------------------------------
.../server/upgrade/UpgradeCatalog240.java | 7 ++
.../server/upgrade/UpgradeCatalog240Test.java | 125 +++++++++++++++++++
2 files changed, 132 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d081b19/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index daec571..2878b0e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -1038,6 +1038,8 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
* In hdfs-site, set dfs.client.retry.policy.enabled=false
* This is needed for Rolling/Express upgrade so that clients don't keep retrying, which exhausts the retries and
* doesn't allow for a graceful failover, which is expected.
+ *
+ * Rely on dfs.internal.nameservices after upgrade. Copy the value from dfs.services
* @throws AmbariException
*/
protected void updateHDFSConfigs() throws AmbariException {
@@ -1058,6 +1060,11 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
if (null != clientRetryPolicyEnabled && Boolean.parseBoolean(clientRetryPolicyEnabled)) {
updateConfigurationProperties("hdfs-site", Collections.singletonMap("dfs.client.retry.policy.enabled", "false"), true, false);
}
+ String nameservices = hdfsSite.getProperties().get("dfs.nameservices");
+ String int_nameservices = hdfsSite.getProperties().get("dfs.internal.nameservices");
+ if(int_nameservices == null && nameservices != null) {
+ updateConfigurationProperties("hdfs-site", Collections.singletonMap("dfs.internal.nameservices", nameservices), true, false);
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d081b19/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index 90024a1..73bfa74 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -68,6 +68,7 @@ import org.apache.ambari.server.state.AlertFirmness;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.stack.OsFamily;
import org.easymock.Capture;
import org.easymock.CaptureType;
@@ -443,6 +444,130 @@ public class UpgradeCatalog240Test {
Assert.assertEquals(result, expectedSource);
}
+ /**
+ * Test that dfs.internal.nameservices is not affected
+ * Also, dfs.client.retry.policy.enabled is reset
+ * @throws Exception
+ */
+ @Test
+ public void testHdfsSiteUpdateConfigs2() throws Exception{
+ Map<String, String> oldPropertiesHdfsSite = new HashMap<String, String>() {
+ {
+ put("dfs.client.retry.policy.enabled", "true");
+ }
+ };
+ Map<String, String> newPropertiesHdfsSite = new HashMap<String, String>() {
+ {
+ put("dfs.client.retry.policy.enabled", "false");
+ }
+ };
+ EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+ Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+ final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+ final Service service = createStrictMock(Service.class);
+ final Map<String, Service> services = Collections.singletonMap("HDFS", service);
+ Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
+
+ expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+ put("normal", cluster);
+ }}).anyTimes();
+ expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
+ expect(mockHdfsSite.getProperties()).andReturn(oldPropertiesHdfsSite).anyTimes();
+ expect(cluster.getServices()).andReturn(services).once();
+
+ Injector injector = easyMockSupport.createNiceMock(Injector.class);
+ expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+ replay(injector, clusters, mockHdfsSite, cluster);
+
+ AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+ .addMockedMethod("createConfiguration")
+ .addMockedMethod("getClusters", new Class[] { })
+ .addMockedMethod("createConfig")
+ .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+ .createNiceMock();
+
+ Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+ Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+ expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+ expect(controller.getClusters()).andReturn(clusters).anyTimes();
+ expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+
+ replay(controller, injector2);
+ new UpgradeCatalog240(injector2).updateHDFSConfigs();
+ easyMockSupport.verifyAll();
+
+ Map<String, String> updatedProperties = propertiesCapture.getValue();
+ assertTrue(Maps.difference(newPropertiesHdfsSite, updatedProperties).areEqual());
+ }
+
+ /**
+ * Test that dfs.nameservices is copied over to dfs.internal.nameservices
+ * @throws Exception
+ */
+ @Test
+ public void testHdfsSiteUpdateConfigs() throws Exception{
+ Map<String, String> oldPropertiesHdfsSite = new HashMap<String, String>() {
+ {
+ put("dfs.nameservices", "nnha");
+ }
+ };
+ Map<String, String> newPropertiesHdfsSite = new HashMap<String, String>() {
+ {
+ put("dfs.nameservices", "nnha");
+ put("dfs.internal.nameservices", "nnha");
+ }
+ };
+ EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+ Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+ final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+ final Service service = createStrictMock(Service.class);
+ final Map<String, Service> services = Collections.singletonMap("HDFS", service);
+ Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
+
+ expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+ put("normal", cluster);
+ }}).anyTimes();
+ expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
+ expect(mockHdfsSite.getProperties()).andReturn(oldPropertiesHdfsSite).anyTimes();
+ expect(cluster.getServices()).andReturn(services).once();
+
+ Injector injector = easyMockSupport.createNiceMock(Injector.class);
+ expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+ expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+ replay(injector, clusters, mockHdfsSite, cluster);
+
+ AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+ .addMockedMethod("createConfiguration")
+ .addMockedMethod("getClusters", new Class[] { })
+ .addMockedMethod("createConfig")
+ .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+ .createNiceMock();
+
+ Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+ Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+ expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+ expect(controller.getClusters()).andReturn(clusters).anyTimes();
+ expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+
+ replay(controller, injector2);
+ new UpgradeCatalog240(injector2).updateHDFSConfigs();
+ easyMockSupport.verifyAll();
+
+ Map<String, String> updatedProperties = propertiesCapture.getValue();
+ assertTrue(Maps.difference(newPropertiesHdfsSite, updatedProperties).areEqual());
+ }
+
@Test
public void testAmsHbaseEnvUpdateConfigs() throws Exception{