You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/02/17 20:52:20 UTC
ambari git commit: AMBARI-20066. 'Install' button is shown in Admin >
Stack and Versions > Version page for the original stack version installed
via Install Wizard (ncole)
Repository: ambari
Updated Branches:
refs/heads/trunk a5dc2d2cc -> f2cb1b6ef
AMBARI-20066. 'Install' button is shown in Admin > Stack and Versions > Version page for the original stack version installed via Install Wizard (ncole)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f2cb1b6e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f2cb1b6e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f2cb1b6e
Branch: refs/heads/trunk
Commit: f2cb1b6ef495d0708024749dfe5b702d270952a2
Parents: a5dc2d2
Author: Nate Cole <nc...@hortonworks.com>
Authored: Fri Feb 17 14:01:06 2017 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Fri Feb 17 15:52:15 2017 -0500
----------------------------------------------------------------------
.../server/state/cluster/ClusterImpl.java | 43 +++++++++--
.../server/state/cluster/ClusterTest.java | 78 +++++++++++++-------
2 files changed, 88 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f2cb1b6e/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index db4aa21..2d94f1a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -1313,7 +1313,7 @@ public class ClusterImpl implements Cluster {
* @return Return the effective Cluster Version State
*/
private RepositoryVersionState getEffectiveState(Map<RepositoryVersionState, Set<String>> stateToHosts) {
- if (stateToHosts == null || stateToHosts.keySet().size() < 1) {
+ if (stateToHosts == null || stateToHosts.size() < 1) {
return null;
}
@@ -1350,18 +1350,51 @@ public class ClusterImpl implements Cluster {
}
if (totalNotRequired > 0) {
- if (totalInstalled + totalNotRequired == totalHosts) {
- return RepositoryVersionState.INSTALLED;
+
+ // !!! if all we have is NOT_REQUIRED and something else, the return should be the something else
+ if (2 == stateToHosts.size()) {
+
+ Map<RepositoryVersionState, Set<String>> map = Maps.filterKeys(stateToHosts,
+ new com.google.common.base.Predicate<RepositoryVersionState>() {
+ @Override
+ public boolean apply(RepositoryVersionState repoState) {
+ return repoState != RepositoryVersionState.NOT_REQUIRED;
+ }
+ });
+
+ // !!! better be true
+ if (1 == map.size()) {
+ return map.keySet().iterator().next();
+ } else {
+ LOG.warn("The mix of NON_REQUIRED hosts is unexpected: {}", stateToHosts);
+ return RepositoryVersionState.OUT_OF_SYNC;
+ }
}
- if (totalInstalling + totalInstalled + totalNotRequired == totalHosts) {
+ // if any hosts are still installing, then cluster is INSTALLING
+ if (totalInstalling > 0) {
return RepositoryVersionState.INSTALLING;
}
+ // if any hosts are install_failed, then cluster is INSTALL_FAILED
+ if (totalInstallFailed > 0) {
+ return RepositoryVersionState.INSTALL_FAILED;
+ }
+
+ // should be covered by the 2-state check above
+ if (totalInstalled > 0) {
+ return RepositoryVersionState.INSTALLED;
+ }
+
+ // rare
+ if (totalNotRequired == totalHosts) {
+ return RepositoryVersionState.NOT_REQUIRED;
+ }
+
}
// Also returns when have a mix of CURRENT and INSTALLING|INSTALLED
- LOG.warn("have a mix of CURRENT and INSTALLING|INSTALLED host versions, " +
+ LOG.warn("Have a mix of CURRENT and INSTALLING|INSTALLED host versions, " +
"returning OUT_OF_SYNC as cluster version. Host version states: {}", stateToHosts);
return RepositoryVersionState.OUT_OF_SYNC;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f2cb1b6e/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 6cdfbad..396680a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -221,6 +221,10 @@ public class ClusterTest {
}
private void createDefaultCluster() throws Exception {
+ createDefaultCluster(Sets.newHashSet("h1", "h2"));
+ }
+
+ private void createDefaultCluster(Set<String> hostNames) throws Exception {
// TODO, use common function
StackId stackId = new StackId("HDP", "0.1");
StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
@@ -230,11 +234,10 @@ public class ClusterTest {
clusters.addCluster(clusterName, stackId);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
- Set<String> hostNames = new HashSet<String>() {{ add("h1"); add("h2"); }};
for (String hostName : hostNames) {
clusters.addHost(hostName);
@@ -279,7 +282,7 @@ public class ClusterTest {
host2.setIpv4("192.168.0.2");
host3.setIpv4("192.168.0.3");
- List<HostEntity> hostEntities = new ArrayList<HostEntity>();
+ List<HostEntity> hostEntities = new ArrayList<>();
hostEntities.add(host1);
hostEntities.add(host2);
@@ -309,7 +312,7 @@ public class ClusterTest {
when(stateEntity.getDesiredStack()).thenReturn(stackEntity);
clusterServiceEntity.setServiceDesiredStateEntity(stateEntity);
- List<ClusterServiceEntity> clusterServiceEntities = new ArrayList<ClusterServiceEntity>();
+ List<ClusterServiceEntity> clusterServiceEntities = new ArrayList<>();
clusterServiceEntities.add(clusterServiceEntity);
clusterEntity.setClusterServiceEntities(clusterServiceEntities);
return clusterEntity;
@@ -444,8 +447,8 @@ public class ClusterTest {
For some reason this still uses the metainfo.xml files for these services
from HDP-2.0.5 stack instead of the provided Stack Id
*/
- HashMap<String, Set<String>> componentsThatAdvertiseVersion = new HashMap<String, Set<String>>();
- HashMap<String, Set<String>> componentsThatDontAdvertiseVersion = new HashMap<String, Set<String>>();
+ HashMap<String, Set<String>> componentsThatAdvertiseVersion = new HashMap<>();
+ HashMap<String, Set<String>> componentsThatDontAdvertiseVersion = new HashMap<>();
Set<String> hdfsComponents = new HashSet<String>() {{ add("NAMENODE"); add("DATANODE"); add("HDFS_CLIENT"); }};
Set<String> zkComponents = new HashSet<String>() {{ add("ZOOKEEPER_SERVER"); add("ZOOKEEPER_CLIENT"); }};
@@ -567,7 +570,7 @@ public class ClusterTest {
hostInfo.setMemoryTotal(10);
hostInfo.setMemorySize(100);
hostInfo.setProcessorCount(10);
- List<DiskInfo> mounts = new ArrayList<DiskInfo>();
+ List<DiskInfo> mounts = new ArrayList<>();
mounts.add(new DiskInfo("/dev/sda", "/mnt/disk1",
"5000000", "4000000", "10%", "size", "fstype"));
hostInfo.setMounts(mounts);
@@ -839,7 +842,7 @@ public class ClusterTest {
Assert.assertEquals(1, componentHostMap.get("JOBTRACKER").size());
Assert.assertTrue(componentHostMap.get("JOBTRACKER").contains("h1"));
- componentHostMap = c1.getServiceComponentHostMap(null, new HashSet<String>(Arrays.asList("HDFS", "MAPREDUCE")));
+ componentHostMap = c1.getServiceComponentHostMap(null, new HashSet<>(Arrays.asList("HDFS", "MAPREDUCE")));
Assert.assertEquals(3, componentHostMap.size());
Assert.assertEquals(1, componentHostMap.get("NAMENODE").size());
Assert.assertTrue(componentHostMap.get("NAMENODE").contains("h1"));
@@ -896,7 +899,7 @@ public class ClusterTest {
Assert.assertEquals(1, componentHostMap.get("DATANODE").size());
Assert.assertTrue(componentHostMap.get("DATANODE").contains("h2"));
- componentHostMap = c1.getServiceComponentHostMap(new HashSet<String>(Arrays.asList("h1", "h2", "h3")), null);
+ componentHostMap = c1.getServiceComponentHostMap(new HashSet<>(Arrays.asList("h1", "h2", "h3")), null);
Assert.assertEquals(3, componentHostMap.size());
Assert.assertEquals(1, componentHostMap.get("NAMENODE").size());
Assert.assertTrue(componentHostMap.get("NAMENODE").contains("h1"));
@@ -959,10 +962,10 @@ public class ClusterTest {
public void testGetAndSetConfigs() throws Exception {
createDefaultCluster();
- Map<String, Map<String, String>> c1PropAttributes = new HashMap<String, Map<String,String>>();
+ Map<String, Map<String, String>> c1PropAttributes = new HashMap<>();
c1PropAttributes.put("final", new HashMap<String, String>());
c1PropAttributes.get("final").put("a", "true");
- Map<String, Map<String, String>> c2PropAttributes = new HashMap<String, Map<String,String>>();
+ Map<String, Map<String, String>> c2PropAttributes = new HashMap<>();
c2PropAttributes.put("final", new HashMap<String, String>());
c2PropAttributes.get("final").put("x", "true");
Config config1 = configFactory.createNew(c1, "global", "version1",
@@ -1068,7 +1071,7 @@ public class ClusterTest {
host.setIPv4("ipv4");
host.setIPv6("ipv6");
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
host.setHostAttributes(hostAttributes);
@@ -1129,7 +1132,7 @@ public class ClusterTest {
Config config2 = configFactory.createNew(c1, "core-site", "version2",
new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
- Set<Config> configs = new HashSet<Config>();
+ Set<Config> configs = new HashSet<>();
configs.add(config1);
configs.add(config2);
@@ -1190,7 +1193,7 @@ public class ClusterTest {
Host host1 = clusters.getHost("h1");
HostEntity hostEntity1 = hostDAO.findByName("h1");
- Map<String, Map<String, String>> propAttributes = new HashMap<String, Map<String,String>>();
+ Map<String, Map<String, String>> propAttributes = new HashMap<>();
propAttributes.put("final", new HashMap<String, String>());
propAttributes.get("final").put("test", "true");
Config config = configFactory.createNew(c1, "hdfs-site", "1", new HashMap<String, String>(){{
@@ -1204,7 +1207,7 @@ public class ClusterTest {
assertTrue(configs.containsKey(hostEntity1.getHostId()));
assertEquals(1, configs.get(hostEntity1.getHostId()).size());
- List<Long> hostIds = new ArrayList<Long>();
+ List<Long> hostIds = new ArrayList<>();
hostIds.add(hostEntity1.getHostId());
configs = c1.getHostsDesiredConfigs(hostIds);
@@ -1294,7 +1297,7 @@ public class ClusterTest {
Config config2 = configFactory.createNew(c1, "core-site", "version2",
new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
- Set<Config> configs = new HashSet<Config>();
+ Set<Config> configs = new HashSet<>();
configs.add(config1);
configs.add(config2);
@@ -1853,7 +1856,7 @@ public class ClusterTest {
@Test
public void testRecalculateClusterVersionStateWithNotRequired() throws Exception {
- createDefaultCluster();
+ createDefaultCluster(Sets.newHashSet("h1", "h2", "h3"));
Host h1 = clusters.getHost("h1");
h1.setState(HostState.HEALTHY);
@@ -1861,6 +1864,9 @@ public class ClusterTest {
Host h2 = clusters.getHost("h2");
h2.setState(HostState.HEALTHY);
+ Host h3 = clusters.getHost("h3");
+ h3.setState(HostState.HEALTHY);
+
// Phase 1: Install bits during distribution
StackId stackId = new StackId("HDP-0.1");
final String stackVersion = "0.1-1000";
@@ -1872,13 +1878,29 @@ public class ClusterTest {
RepositoryVersionState.INSTALLING);
c1.setCurrentStackVersion(stackId);
- HostVersionEntity hv1 = helper.createHostVersion("h1", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
- HostVersionEntity hv2 = helper.createHostVersion("h2", repositoryVersionEntity, RepositoryVersionState.NOT_REQUIRED);
+ HostVersionEntity hv1 = helper.createHostVersion("h1", repositoryVersionEntity, RepositoryVersionState.NOT_REQUIRED);
+ HostVersionEntity hv2 = helper.createHostVersion("h2", repositoryVersionEntity, RepositoryVersionState.INSTALLING);
+ HostVersionEntity hv3 = helper.createHostVersion("h3", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
c1.recalculateClusterVersionState(repositoryVersionEntity);
- //Should remain in its current state
- checkStackVersionState(stackId, stackVersion,
- RepositoryVersionState.INSTALLED);
+ ClusterVersionEntity cv = clusterVersionDAO.findByClusterAndStackAndVersion(c1.getClusterName(), stackId, stackVersion);
+ assertEquals(RepositoryVersionState.INSTALLING, cv.getState());
+
+ // 1 in NOT_REQUIRED, 1 in INSTALLED, 1 in CURRENT so should be INSTALLED
+ hv2.setState(RepositoryVersionState.CURRENT);
+ hostVersionDAO.merge(hv2);
+
+ c1.recalculateClusterVersionState(repositoryVersionEntity);
+ cv = clusterVersionDAO.findByClusterAndStackAndVersion(c1.getClusterName(), stackId, stackVersion);
+ assertEquals(RepositoryVersionState.INSTALLED, cv.getState());
+
+ // 1 in NOT_REQUIRED, and 2 in CURRENT, so cluster version should be CURRENT
+ hv3.setState(RepositoryVersionState.CURRENT);
+ hostVersionDAO.merge(hv3);
+
+ c1.recalculateClusterVersionState(repositoryVersionEntity);
+ cv = clusterVersionDAO.findByClusterAndStackAndVersion(c1.getClusterName(), stackId, stackVersion);
+ assertEquals(RepositoryVersionState.CURRENT, cv.getState());
}
@@ -1949,7 +1971,7 @@ public class ClusterTest {
RepositoryVersionEntity rv1 = helper.getOrCreateRepositoryVersion(stackId, v1);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.4");
@@ -2106,7 +2128,7 @@ public class ClusterTest {
RepositoryVersionEntity rv1 = helper.getOrCreateRepositoryVersion(stackId, v1);
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.4");
@@ -2180,7 +2202,7 @@ public class ClusterTest {
h.setIPv4("ipv4");
h.setIPv6("ipv6");
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
h.setHostAttributes(hostAttributes);
@@ -2249,7 +2271,7 @@ public class ClusterTest {
h.setIPv4("ipv4");
h.setIPv6("ipv6");
- Map<String, String> hostAttributes = new HashMap<String, String>();
+ Map<String, String> hostAttributes = new HashMap<>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
h.setHostAttributes(hostAttributes);
@@ -2582,8 +2604,8 @@ public class ClusterTest {
// make sure the stacks are different
Assert.assertFalse(stackId.equals(newStackId));
- Map<String, String> properties = new HashMap<String, String>();
- Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String, String>>();
+ Map<String, String> properties = new HashMap<>();
+ Map<String, Map<String, String>> propertiesAttributes = new HashMap<>();
// foo-type for v1 on current stack
properties.put("foo-property-1", "foo-value-1");