You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2018/08/30 14:49:22 UTC
[ambari] branch trunk updated: [AMBARI-24458] Start operation is
disabled for HDFS after stopping components for one namespace (dsen)
(#2152)
This is an automated email from the ASF dual-hosted git repository.
dsen pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push:
new c4f79dd [AMBARI-24458] Start operation is disabled for HDFS after stopping components for one namespace (dsen) (#2152)
c4f79dd is described below
commit c4f79dd0d678847f1a5ed9ea4b9a9d2b6dd72331
Author: Dmitry Sen <ds...@apache.org>
AuthorDate: Thu Aug 30 17:49:20 2018 +0300
[AMBARI-24458] Start operation is disabled for HDFS after stopping components for one namespace (dsen) (#2152)
---
.../state/HDFSServiceCalculatedState.java | 43 +++++++++--
.../state/HDFSServiceCalculatedStateTest.java | 85 ++++++++++++++++++++++
2 files changed, 123 insertions(+), 5 deletions(-)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java
index ad96801..108decc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java
@@ -18,7 +18,10 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import org.apache.ambari.server.AmbariException;
@@ -27,8 +30,10 @@ import org.apache.ambari.server.StaticallyInject;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.ServiceComponentHostRequest;
import org.apache.ambari.server.controller.ServiceComponentHostResponse;
+import org.apache.ambari.server.stack.NameService;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
@@ -59,8 +64,10 @@ public final class HDFSServiceCalculatedState extends DefaultServiceCalculatedSt
Set<ServiceComponentHostResponse> hostComponentResponses =
managementControllerProvider.get().getHostComponents(Collections.singleton(request), true);
+ Set<String> startedOrDisabledNNHosts = new HashSet<>();
+
int nameNodeCount = 0;
- int nameNodeActiveCount = 0;
+ int nameNodeStartedOrDisabledCount = 0;
boolean hasSecondary = false;
boolean hasJournal = false;
State nonStartedState = null;
@@ -94,7 +101,8 @@ public final class HDFSServiceCalculatedState extends DefaultServiceCalculatedSt
case STARTED:
case DISABLED:
if (isNameNode) {
- ++nameNodeActiveCount;
+ ++nameNodeStartedOrDisabledCount;
+ startedOrDisabledNNHosts.add(hostComponentResponse.getHostname());
}
break;
default:
@@ -106,9 +114,34 @@ public final class HDFSServiceCalculatedState extends DefaultServiceCalculatedSt
}
}
- if ( nonStartedState == null || // all started
- ((nameNodeCount > 0 && !hasSecondary || hasJournal) &&
- nameNodeActiveCount > 0)) { // at least one active namenode
+ boolean multipleNameServices = nameNodeCount > 2;
+ int nameServiceWithStartedOrDisabledNNCount = 0;
+ List<NameService> nameServices = new ArrayList<>();
+
+ // count name services that has at least 1 namenode in started or disabled state
+ if (multipleNameServices) {
+ ConfigHelper configHelper = managementControllerProvider.get().getConfigHelper();
+ nameServices = NameService.fromConfig(configHelper, cluster);
+
+ for (NameService nameService : nameServices) {
+ boolean hasStartedOrDisabledNN = false;
+ for (NameService.NameNode nameNode : nameService.getNameNodes()) {
+ if (startedOrDisabledNNHosts.contains(nameNode.getHost())) {
+ hasStartedOrDisabledNN = true;
+ break;
+ }
+ }
+ if (hasStartedOrDisabledNN) {
+ nameServiceWithStartedOrDisabledNNCount++;
+ }
+ }
+ }
+
+ // all started OR at least one active namenode for single namespace AND at least one namenode for each namespace for multiple namespaces
+ if (nonStartedState == null || // all started
+ ((nameNodeCount > 0 && !hasSecondary || hasJournal) &&
+ nameNodeStartedOrDisabledCount > 0 &&
+ (!multipleNameServices || nameServiceWithStartedOrDisabledNNCount == nameServices.size()))) {
return State.STARTED;
}
return nonStartedState;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
index cfae64f..c7d7811 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
@@ -19,15 +19,22 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
+import org.junit.Test;
public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
@@ -86,4 +93,82 @@ public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStat
State state = serviceCalculatedState.getState(clusterName, getServiceName());
Assert.assertEquals(State.INSTALLED, state);
}
+
+ //Should be in INSTALLED state when all NNs for at least one NS is INSTALLED
+ @Test
+ public void testServiceState_STOPPED_WITH_TWO_NS() throws Exception {
+ simulateNNFederation();
+ ServiceComponent nnComponent = service.getServiceComponent("NAMENODE");
+
+ updateServiceState(State.STARTED);
+
+ nnComponent.getServiceComponentHost("h3").setState(State.INSTALLED);
+ nnComponent.getServiceComponentHost("h4").setState(State.INSTALLED);
+
+ State state = serviceCalculatedState.getState(clusterName, getServiceName());
+ Assert.assertEquals(State.INSTALLED, state);
+ }
+
+ //Should be in STARTED state when at least one NN for each NS is STARTED
+ @Test
+ public void testServiceState_STARTED_WITH_TWO_NS() throws Exception {
+ simulateNNFederation();
+ ServiceComponent nnComponent = service.getServiceComponent("NAMENODE");
+
+ updateServiceState(State.STARTED);
+
+ nnComponent.getServiceComponentHost("h1").setState(State.INSTALLED);
+ nnComponent.getServiceComponentHost("h4").setState(State.INSTALLED);
+
+ State state = serviceCalculatedState.getState(clusterName, getServiceName());
+ Assert.assertEquals(State.STARTED, state);
+ }
+
+ private void simulateNNFederation() throws AmbariException {
+ HashMap<String, String> hdfsSiteProperties = new HashMap<>();
+ hdfsSiteProperties.put("dfs.internal.nameservices", "ns1,ns2");
+ hdfsSiteProperties.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ hdfsSiteProperties.put("dfs.ha.namenodes.ns2", "nn3,nn4");
+ hdfsSiteProperties.put("dfs.namenode.http-address.ns1.nn1", "h1:1234");
+ hdfsSiteProperties.put("dfs.namenode.http-address.ns1.nn2", "h2:1234");
+ hdfsSiteProperties.put("dfs.namenode.http-address.ns2.nn3", "h3:1234");
+ hdfsSiteProperties.put("dfs.namenode.http-address.ns2.nn4", "h4:1234");
+
+ ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+ Config config = configFactory.createNew(cluster, "hdfs-site", "version1",
+ hdfsSiteProperties, new HashMap<>());
+ cluster.addDesiredConfig("_test", Collections.singleton(config));
+
+ ServiceComponent nnComponent = service.getServiceComponent("NAMENODE");
+ ServiceComponent clientComponent = service.getServiceComponent("HDFS_CLIENT");
+ ServiceComponent jnComponent = service.addServiceComponent("JOURNALNODE");
+
+ List<String> newHosts = new ArrayList<>();
+ newHosts.add("h3");
+ newHosts.add("h4");
+
+ for (String hostName: newHosts){
+ clusters.addHost(hostName);
+ Host host = clusters.getHost(hostName);
+
+ Map<String, String> hostAttributes = new HashMap<>();
+ hostAttributes.put("os_family", "redhat");
+ hostAttributes.put("os_release_version", "6.3");
+ host.setHostAttributes(hostAttributes);
+ host.setState(HostState.HEALTHY);
+ clusters.mapHostToCluster(hostName, clusterName);
+
+ ServiceComponentHost sch = nnComponent.addServiceComponentHost(hostName);
+ sch.setVersion("2.1.1.0");
+ sch.setState(State.STARTED);
+
+ sch = jnComponent.addServiceComponentHost(hostName);
+ sch.setVersion("2.1.1.0");
+ sch.setState(State.INSTALLED);
+
+ sch = clientComponent.addServiceComponentHost(hostName);
+ sch.setVersion("2.1.1.0");
+ sch.setState(State.INSTALLED);
+ }
+ }
}