You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2017/02/16 14:03:13 UTC
ambari git commit: AMBARI-20014 Multiple unit tests accessing same
table which causes lock issues (dsen)
Repository: ambari
Updated Branches:
refs/heads/trunk ea82a59de -> b12ae51dd
AMBARI-20014 Multiple unit tests accessing same table which causes lock issues (dsen)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b12ae51d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b12ae51d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b12ae51d
Branch: refs/heads/trunk
Commit: b12ae51dd4a0c745827eea7ceb75c362e00d6f75
Parents: ea82a59
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Feb 16 16:03:05 2017 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Feb 16 16:03:05 2017 +0200
----------------------------------------------------------------------
.../server/state/cluster/ClusterImpl.java | 2 +
.../AmbariManagementControllerTest.java | 121 ++++++++++---------
2 files changed, 64 insertions(+), 59 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/b12ae51d/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 2a66795..db4aa21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -2090,6 +2090,8 @@ public class ClusterImpl implements Cluster {
try {
refresh();
deleteAllServices();
+
+ refresh(); // update one-to-many clusterServiceEntities
removeEntities();
allConfigs.clear();
} finally {
http://git-wip-us.apache.org/repos/asf/ambari/blob/b12ae51d/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index af67f05..89f9d94 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -151,6 +151,7 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartedEve
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStoppedEvent;
import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.EventBusSynchronizer;
import org.apache.ambari.server.utils.StageUtils;
import org.apache.commons.collections.CollectionUtils;
import org.easymock.Capture;
@@ -199,7 +200,6 @@ public class AmbariManagementControllerTest {
private static final String FAKE_SERVICE_NAME = "FAKENAGIOS";
private static final int STACK_VERSIONS_CNT = 16;
private static final int REPOS_CNT = 3;
- private static final int STACKS_CNT = 3;
private static final int STACK_PROPERTIES_CNT = 103;
private static final int STACK_COMPONENTS_CNT = 4;
private static final int OS_CNT = 2;
@@ -258,6 +258,8 @@ public class AmbariManagementControllerTest {
@Before
public void setup() throws Exception {
+ EventBusSynchronizer.synchronizeAmbariEventPublisher(injector);
+
entityManager = injector.getProvider(EntityManager.class).get();
actionDB = injector.getInstance(ActionDBAccessor.class);
serviceFactory = injector.getInstance(ServiceFactory.class);
@@ -7235,10 +7237,14 @@ public class AmbariManagementControllerTest {
@Test
public void testGetStacks() throws Exception {
+ HashSet<String> availableStacks = new HashSet<>();
+ for (StackInfo stackInfo: ambariMetaInfo.getStacks()){
+ availableStacks.add(stackInfo.getName());
+ }
StackRequest request = new StackRequest(null);
Set<StackResponse> responses = controller.getStacks(Collections.singleton(request));
- Assert.assertEquals(STACKS_CNT, responses.size());
+ Assert.assertEquals(availableStacks.size(), responses.size());
StackRequest requestWithParams = new StackRequest(STACK_NAME);
Set<StackResponse> responsesWithParams = controller.getStacks(Collections.singleton(requestWithParams));
@@ -9256,84 +9262,81 @@ public class AmbariManagementControllerTest {
String HOST1 = getUniqueName();
String HOST2 = getUniqueName();
- try {
- Clusters clusters = injector.getInstance(Clusters.class);
+ Clusters clusters = injector.getInstance(Clusters.class);
- clusters.addHost(HOST1);
- Host host = clusters.getHost(HOST1);
- setOsFamily(host, "redhat", "6.3");
- clusters.getHost(HOST1).setState(HostState.HEALTHY);
+ clusters.addHost(HOST1);
+ Host host = clusters.getHost(HOST1);
+ setOsFamily(host, "redhat", "6.3");
+ clusters.getHost(HOST1).setState(HostState.HEALTHY);
- clusters.addHost(HOST2);
- host = clusters.getHost(HOST2);
- setOsFamily(host, "redhat", "6.3");
+ clusters.addHost(HOST2);
+ host = clusters.getHost(HOST2);
+ setOsFamily(host, "redhat", "6.3");
- AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+ AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
- ClusterRequest cr = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
- amc.createCluster(cr);
+ ClusterRequest cr = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
+ amc.createCluster(cr);
- Long CLUSTER_ID = clusters.getCluster(CLUSTER_NAME).getClusterId();
+ Long CLUSTER_ID = clusters.getCluster(CLUSTER_NAME).getClusterId();
- ConfigurationRequest configRequest = new ConfigurationRequest(CLUSTER_NAME, "global", "version1",
- new HashMap<String, String>() {{ put("a", "b"); }}, null);
- cr.setDesiredConfig(Collections.singletonList(configRequest));
- cr.setClusterId(CLUSTER_ID);
- amc.updateClusters(Collections.singleton(cr), new HashMap<String, String>());
+ ConfigurationRequest configRequest = new ConfigurationRequest(CLUSTER_NAME, "global", "version1",
+ new HashMap<String, String>() {{ put("a", "b"); }}, null);
+ cr.setDesiredConfig(Collections.singletonList(configRequest));
+ cr.setClusterId(CLUSTER_ID);
+ amc.updateClusters(Collections.singleton(cr), new HashMap<String, String>());
- // add some hosts
- Set<HostRequest> hrs = new HashSet<HostRequest>();
- hrs.add(new HostRequest(HOST1, CLUSTER_NAME, null));
- HostResourceProviderTest.createHosts(amc, hrs);
+ // add some hosts
+ Set<HostRequest> hrs = new HashSet<HostRequest>();
+ hrs.add(new HostRequest(HOST1, CLUSTER_NAME, null));
+ HostResourceProviderTest.createHosts(amc, hrs);
- Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
- serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null));
- serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null));
- serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null));
+ Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+ serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null));
+ serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null));
+ serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null));
- ServiceResourceProviderTest.createServices(amc, serviceRequests);
+ ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+ Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null));
+ serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", null));
- Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null));
- serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", null));
+ ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests);
- ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests);
+ Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "DATANODE", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "NAMENODE", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", HOST1, null));
+ componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", HOST1, null));
- Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "DATANODE", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "NAMENODE", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", HOST1, null));
- componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", HOST1, null));
+ amc.createHostComponents(componentHostRequests);
- amc.createHostComponents(componentHostRequests);
+ RequestResourceFilter resourceFilter = new RequestResourceFilter("HDFS", null, null);
+ ExecuteActionRequest ar = new ExecuteActionRequest(CLUSTER_NAME, Role.HDFS_SERVICE_CHECK.name(), null, false);
+ ar.getResourceFilters().add(resourceFilter);
+ amc.createAction(ar, null);
- RequestResourceFilter resourceFilter = new RequestResourceFilter("HDFS", null, null);
- ExecuteActionRequest ar = new ExecuteActionRequest(CLUSTER_NAME, Role.HDFS_SERVICE_CHECK.name(), null, false);
- ar.getResourceFilters().add(resourceFilter);
- amc.createAction(ar, null);
- // change mind, delete the cluster
- amc.deleteCluster(cr);
+ // change mind, delete the cluster
+ amc.deleteCluster(cr);
assertNotNull(clusters.getHost(HOST1));
assertNotNull(clusters.getHost(HOST2));
- HostDAO dao = injector.getInstance(HostDAO.class);
+ HostDAO dao = injector.getInstance(HostDAO.class);
- assertNotNull(dao.findByName(HOST1));
- assertNotNull(dao.findByName(HOST2));
+ assertNotNull(dao.findByName(HOST1));
+ assertNotNull(dao.findByName(HOST2));
- } finally {
-// injector.getInstance(PersistService.class).stop();
- }
}
@Test