You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/02/26 00:05:12 UTC
[1/3] ambari git commit: AMBARI-15169: namenode_ha_utils.py returns
sometimes Active NNs as emtpy set,
and Stand-by NN as a set of 2 NNs in multi-homed environment (jluniya)
Repository: ambari
Updated Branches:
refs/heads/branch-2.2 844abe636 -> 15933088c
AMBARI-15169: namenode_ha_utils.py returns sometimes Active NNs as emtpy set, and Stand-by NN as a set of 2 NNs in multi-homed environment (jluniya)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/060babc8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/060babc8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/060babc8
Branch: refs/heads/branch-2.2
Commit: 060babc872a8697dd56e9ff83fa19420c8451b4d
Parents: 844abe6
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Feb 25 14:50:11 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Feb 25 15:04:39 2016 -0800
----------------------------------------------------------------------
.../libraries/functions/namenode_ha_utils.py | 29 ++++++++++++++++----
.../package/alerts/alert_ha_namenode_health.py | 14 ++++++++--
2 files changed, 36 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/060babc8/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index 0920e85..36a34c1 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -33,7 +33,10 @@ HDFS_NN_STATE_STANDBY = 'standby'
NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
+NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
+NAMENODE_RPC_NON_HA = 'dfs.namenode.rpc-address'
JMX_URI_FRAGMENT = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem"
+INADDR_ANY = '0.0.0.0'
def get_namenode_states(hdfs_site, security_enabled, run_user, times=10, sleep_time=1, backoff_factor=2):
"""
@@ -73,7 +76,8 @@ def get_namenode_states_noretries(hdfs_site, security_enabled, run_user):
nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
for nn_unique_id in nn_unique_ids:
is_https_enabled = hdfs_site['dfs.https.enable'] if not is_empty(hdfs_site['dfs.https.enable']) else False
-
+
+ rpc_key = NAMENODE_RPC_FRAGMENT.format(name_service,nn_unique_id)
if not is_https_enabled:
key = NAMENODE_HTTP_FRAGMENT.format(name_service,nn_unique_id)
protocol = "http"
@@ -84,6 +88,11 @@ def get_namenode_states_noretries(hdfs_site, security_enabled, run_user):
if key in hdfs_site:
# use str() to ensure that unicode strings do not have the u' in them
value = str(hdfs_site[key])
+ if INADDR_ANY in value and rpc_key in hdfs_site:
+ rpc_value = str(hdfs_site[rpc_key])
+ if INADDR_ANY not in rpc_value:
+ rpc_host = rpc_value.split(":")[0]
+ value = value.replace(INADDR_ANY, rpc_host)
jmx_uri = JMX_URI_FRAGMENT.format(protocol, value)
@@ -139,6 +148,8 @@ def get_property_for_active_namenode(hdfs_site, property_name, security_enabled,
- In non-ha mode it will return hdfs_site[dfs.namenode.rpc-address]
- In ha-mode it will return hdfs_site[dfs.namenode.rpc-address.nnha.nn2], where nnha is the name of HA, and nn2 is id of active NN
"""
+ value = None
+ rpc_key = None
if is_ha_enabled(hdfs_site):
name_service = hdfs_site['dfs.nameservices']
active_namenodes = get_namenode_states(hdfs_site, security_enabled, run_user)[0]
@@ -147,8 +158,16 @@ def get_property_for_active_namenode(hdfs_site, property_name, security_enabled,
raise Fail("There is no active namenodes.")
active_namenode_id = active_namenodes[0][0]
-
- return hdfs_site[format("{property_name}.{name_service}.{active_namenode_id}")]
+ value = hdfs_site[format("{property_name}.{name_service}.{active_namenode_id}")]
+ rpc_key = NAMENODE_RPC_FRAGMENT.format(name_service,active_namenode_id)
else:
- return hdfs_site[property_name]
-
+ value = hdfs_site[property_name]
+ rpc_key = NAMENODE_RPC_NON_HA
+
+ if INADDR_ANY in value and rpc_key in hdfs_site:
+ rpc_value = str(hdfs_site[rpc_key])
+ if INADDR_ANY not in rpc_value:
+ rpc_host = rpc_value.split(":")[0]
+ value = value.replace(INADDR_ANY, rpc_host)
+
+ return value
http://git-wip-us.apache.org/repos/asf/ambari/blob/060babc8/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
index 7cd5591..a174cb4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
@@ -45,6 +45,10 @@ KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+INADDR_ANY = '0.0.0.0'
+NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
+NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
+NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
@@ -122,11 +126,11 @@ def execute(configurations={}, parameters={}, host_name=None):
if not nn_unique_ids_key in hdfs_site:
return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
- namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
+ namenode_http_fragment = NAMENODE_HTTP_FRAGMENT
jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
if is_ssl_enabled:
- namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
+ namenode_http_fragment = NAMENODE_HTTPS_FRAGMENT
jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
@@ -140,10 +144,16 @@ def execute(configurations={}, parameters={}, host_name=None):
nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
for nn_unique_id in nn_unique_ids:
key = namenode_http_fragment.format(name_service,nn_unique_id)
+ rpc_key = NAMENODE_RPC_FRAGMENT.format(name_service,nn_unique_id)
if key in hdfs_site:
# use str() to ensure that unicode strings do not have the u' in them
value = str(hdfs_site[key])
+ if INADDR_ANY in value and rpc_key in hdfs_site:
+ rpc_value = str(hdfs_site[rpc_key])
+ if INADDR_ANY not in rpc_value:
+ rpc_host = rpc_value.split(":")[0]
+ value = value.replace(INADDR_ANY, rpc_host)
try:
jmx_uri = jmx_uri_fragment.format(value)
[3/3] ambari git commit: AMBARI-15168: HBASE_OPTS resets during
Ambari 2.2 upgrade (Ajit Kumar via jluniya)
Posted by jl...@apache.org.
AMBARI-15168: HBASE_OPTS resets during Ambari 2.2 upgrade (Ajit Kumar via jluniya)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15933088
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15933088
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15933088
Branch: refs/heads/branch-2.2
Commit: 15933088c5981c08600a176514282a7283ed14df
Parents: 82a914f
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Feb 25 15:03:18 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Feb 25 15:05:08 2016 -0800
----------------------------------------------------------------------
.../java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/15933088/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
index a434429..2330a21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
@@ -1017,7 +1017,7 @@ public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
String content = hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY);
if (content != null) {
if (!content.contains("-Djava.io.tmpdir")) {
- content += "\n\nexport HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"";
+ content += "\n\nexport HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"";
updateConfig = true;
}
if (stackId != null && stackId.getStackName().equals("HDP") &&
[2/3] ambari git commit: AMBARI-15167: Host API fails with
multi-value filter (Ajit Kumar via jluniya)
Posted by jl...@apache.org.
AMBARI-15167: Host API fails with multi-value filter (Ajit Kumar via jluniya)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/82a914f5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/82a914f5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/82a914f5
Branch: refs/heads/branch-2.2
Commit: 82a914f5580c2bf4cd5a36714237041fa240fbb8
Parents: 060babc
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Feb 25 15:01:32 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Feb 25 15:04:59 2016 -0800
----------------------------------------------------------------------
.../HostStackVersionResourceProvider.java | 89 ++++++------
.../HostStackVersionResourceProviderTest.java | 135 +++++++++++--------
2 files changed, 119 insertions(+), 105 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/82a914f5/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index c7e3ca9..1cd9c0a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -28,6 +28,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import com.google.common.collect.Sets;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.StaticallyInject;
import org.apache.ambari.server.actionmanager.ActionManager;
@@ -68,6 +69,7 @@ import org.apache.ambari.server.utils.StageUtils;
import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Provider;
+import org.apache.commons.lang.Validate;
/**
* Resource provider for host stack versions resources.
@@ -90,33 +92,24 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
protected static final String INSTALL_PACKAGES_FULL_NAME = "Install version";
- @SuppressWarnings("serial")
- private static Set<String> pkPropertyIds = new HashSet<String>() {
- {
- add(HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
- add(HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID);
- add(HOST_STACK_VERSION_ID_PROPERTY_ID);
- add(HOST_STACK_VERSION_STACK_PROPERTY_ID);
- add(HOST_STACK_VERSION_VERSION_PROPERTY_ID);
- add(HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
- }
- };
-
- @SuppressWarnings("serial")
- private static Set<String> propertyIds = new HashSet<String>() {
- {
- add(HOST_STACK_VERSION_ID_PROPERTY_ID);
- add(HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
- add(HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID);
- add(HOST_STACK_VERSION_STACK_PROPERTY_ID);
- add(HOST_STACK_VERSION_VERSION_PROPERTY_ID);
- add(HOST_STACK_VERSION_STATE_PROPERTY_ID);
- add(HOST_STACK_VERSION_REPOSITORIES_PROPERTY_ID);
- add(HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
- }
- };
+ private static Set<String> pkPropertyIds = Sets.newHashSet(
+ HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID,
+ HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID,
+ HOST_STACK_VERSION_ID_PROPERTY_ID,
+ HOST_STACK_VERSION_STACK_PROPERTY_ID,
+ HOST_STACK_VERSION_VERSION_PROPERTY_ID,
+ HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
+
+ private static Set<String> propertyIds = Sets.newHashSet(
+ HOST_STACK_VERSION_ID_PROPERTY_ID,
+ HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID,
+ HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID,
+ HOST_STACK_VERSION_STACK_PROPERTY_ID,
+ HOST_STACK_VERSION_VERSION_PROPERTY_ID,
+ HOST_STACK_VERSION_STATE_PROPERTY_ID,
+ HOST_STACK_VERSION_REPOSITORIES_PROPERTY_ID,
+ HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
- @SuppressWarnings("serial")
private static Map<Type, String> keyPropertyIds = new HashMap<Type, String>() {
{
put(Type.Cluster, HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
@@ -160,7 +153,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
@Override
public Set<Resource> getResources(Request request, Predicate predicate) throws
SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
- final Set<Resource> resources = new HashSet<Resource>();
+ final Set<Resource> resources = new HashSet<>();
final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate);
@@ -171,8 +164,8 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
clusterName = propertyMap.get(HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
}
final Long id;
- List<HostVersionEntity> requestedEntities = new ArrayList<HostVersionEntity>();
- if (propertyMap.get(HOST_STACK_VERSION_ID_PROPERTY_ID) == null && propertyMaps.size() == 1) {
+ List<HostVersionEntity> requestedEntities;
+ if (propertyMap.get(HOST_STACK_VERSION_ID_PROPERTY_ID) == null) {
if (clusterName == null) {
requestedEntities = hostVersionDAO.findByHost(hostName);
} else {
@@ -188,12 +181,12 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
if (entity == null) {
throw new NoSuchResourceException("There is no stack version with id " + id);
} else {
- requestedEntities.add(entity);
+ requestedEntities = Collections.singletonList(entity);
}
}
-
- addRequestedEntities(resources, requestedEntities, requestedIds, clusterName);
-
+ if (requestedEntities != null) {
+ addRequestedEntities(resources, requestedEntities, requestedIds, clusterName);
+ }
}
return resources;
@@ -254,20 +247,17 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
Map<String, Object> propertyMap = iterator.next();
- Set<String> requiredProperties = new HashSet<String>(){{
- add(HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID);
- add(HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
- add(HOST_STACK_VERSION_STACK_PROPERTY_ID);
- add(HOST_STACK_VERSION_VERSION_PROPERTY_ID);
- }};
+ Set<String> requiredProperties = Sets.newHashSet(
+ HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID,
+ HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID,
+ HOST_STACK_VERSION_STACK_PROPERTY_ID,
+ HOST_STACK_VERSION_VERSION_PROPERTY_ID);
for (String requiredProperty : requiredProperties) {
- if (! propertyMap.containsKey(requiredProperty)) {
- throw new IllegalArgumentException(
- String.format("The required property %s is not defined",
- requiredProperty));
- }
+ Validate.isTrue(propertyMap.containsKey(requiredProperty),
+ String.format("The required property %s is not defined", requiredProperty));
}
+
String clName = (String) propertyMap.get(HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
hostName = (String) propertyMap.get(HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID);
desiredRepoVersion = (String) propertyMap.get(HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
@@ -348,7 +338,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
}
List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
- Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<String, List<RepositoryEntity>>();
+ Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
for (OperatingSystemEntity operatingSystem : operatingSystems) {
perOsRepos.put(operatingSystem.getOsType(), operatingSystem.getRepositories());
}
@@ -362,8 +352,8 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
osFamily, desiredRepoVersion, stackId));
}
// For every host at cluster, determine packages for all installed services
- List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>();
- Set<String> servicesOnHost = new HashSet<String>();
+ List<ServiceOsSpecific.Package> packages = new ArrayList<>();
+ Set<String> servicesOnHost = new HashSet<>();
List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
for (ServiceComponentHost component : components) {
servicesOnHost.add(component.getServiceName());
@@ -417,7 +407,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
String caption = String.format(INSTALL_PACKAGES_FULL_NAME + " on host %s", hostName);
RequestStageContainer req = createRequest(caption);
- Map<String, String> hostLevelParams = new HashMap<String, String>();
+ Map<String, String> hostLevelParams = new HashMap<>();
hostLevelParams.put(JDK_LOCATION, getManagementController().getJdkResourceUrl());
// Generate cluster host info
@@ -455,7 +445,6 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
hostVersEntity.setState(RepositoryVersionState.INSTALLING);
hostVersionDAO.merge(hostVersEntity);
- StackId desiredStackId = cluster.getDesiredStackVersion();
cluster.recalculateClusterVersionState(repoVersionEnt);
req.persist();
} catch (AmbariException e) {
@@ -470,7 +459,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
RequestStageContainer requestStages = new RequestStageContainer(
actionManager.getNextRequestId(), null, requestFactory, actionManager);
- requestStages.setRequestContext(String.format(caption));
+ requestStages.setRequestContext(caption);
return requestStages;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/82a914f5/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
index 0824ae1..4fcfce2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
@@ -18,16 +18,18 @@
package org.apache.ambari.server.controller.internal;
+import static junit.framework.Assert.assertEquals;
import static org.easymock.EasyMock.anyLong;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.isA;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
@@ -40,12 +42,13 @@ import org.apache.ambari.server.actionmanager.ActionManager;
import org.apache.ambari.server.actionmanager.HostRoleCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
import org.apache.ambari.server.controller.RequestStatusResponse;
import org.apache.ambari.server.controller.ResourceProviderFactory;
+import org.apache.ambari.server.controller.spi.Predicate;
import org.apache.ambari.server.controller.spi.Request;
import org.apache.ambari.server.controller.spi.Resource;
import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
@@ -53,6 +56,7 @@ import org.apache.ambari.server.orm.dao.HostVersionDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigHelper;
@@ -69,7 +73,6 @@ import org.apache.ambari.server.utils.StageUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import org.powermock.core.classloader.annotations.PrepareForTest;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
@@ -80,8 +83,6 @@ import com.google.inject.util.Modules;
/**
* ClusterStackVersionResourceProvider tests.
*/
-//@RunWith(PowerMockRunner.class)
-@PrepareForTest(AmbariManagementControllerImpl.class)
public class HostStackVersionResourceProviderTest {
private Injector injector;
@@ -89,6 +90,16 @@ public class HostStackVersionResourceProviderTest {
private RepositoryVersionDAO repositoryVersionDAOMock;
private HostVersionDAO hostVersionDAOMock;
private ConfigHelper configHelper;
+ private AmbariManagementController managementController;
+ private Clusters clusters;
+ private Cluster cluster;
+ private RequestStatusResponse response;
+ private ResourceProviderFactory resourceProviderFactory;
+ private ResourceProvider csvResourceProvider;
+ private ActionManager actionManager;
+ private HostVersionEntity hostVersionEntityMock;
+ private RepositoryVersionEntity repoVersion;
+ private Resource.Type type = Resource.Type.HostStackVersion;
private String operatingSystemsJson = "[\n" +
" {\n" +
@@ -119,6 +130,22 @@ public class HostStackVersionResourceProviderTest {
injector = Guice.createInjector(Modules.override(module).with(new MockModule()));
injector.getInstance(GuiceJpaInitializer.class);
ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+ managementController = createMock(AmbariManagementController.class);
+ clusters = createNiceMock(Clusters.class);
+ cluster = createNiceMock(Cluster.class);
+ response = createNiceMock(RequestStatusResponse.class);
+ resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
+ csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
+ hostVersionEntityMock = createNiceMock(HostVersionEntity.class);
+ actionManager = createNiceMock(ActionManager.class);
+ repoVersion = new RepositoryVersionEntity();
+ repoVersion.setOperatingSystems(operatingSystemsJson);
+
+ StackEntity stack = new StackEntity();
+ stack.setStackName("HDP");
+ stack.setStackVersion("2.2");
+ repoVersion.setStack(stack);
+ repoVersion.setVersion("2.2");
}
@After
@@ -128,50 +155,66 @@ public class HostStackVersionResourceProviderTest {
@Test
- public void testCreateResources() throws Exception {
- Resource.Type type = Resource.Type.HostStackVersion;
+ public void testGetResources() throws Exception {
+ // add the property map to a set for the request. add more maps for multiple creates
+ String hostname = "host1";
+ String clustername = "Cluster100";
+
+ Predicate predicate = new PredicateBuilder().begin()
+ .property(HostStackVersionResourceProvider.HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID).equals(hostname)
+ .and()
+ .property(HostStackVersionResourceProvider.HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).equals(clustername)
+ .end().and().begin()
+ .property(HostStackVersionResourceProvider.HOST_STACK_VERSION_STATE_PROPERTY_ID).equals("INSTALLING")
+ .or()
+ .property(HostStackVersionResourceProvider.HOST_STACK_VERSION_STATE_PROPERTY_ID).equals("INSTALL_FAILED")
+ .or()
+ .property(HostStackVersionResourceProvider.HOST_STACK_VERSION_STATE_PROPERTY_ID).equals("OUT_OF_SYNC")
+ .end().toPredicate();
+ // create the request
+ Request request = PropertyHelper.getCreateRequest(Collections.<Map<String,Object>>emptySet(), null);
+ ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+ type,
+ PropertyHelper.getPropertyIds(type),
+ PropertyHelper.getKeyPropertyIds(type),
+ managementController);
+
+ expect(hostVersionDAOMock.findByClusterAndHost(clustername, hostname)).andReturn(Collections.singletonList(hostVersionEntityMock));
+ expect(hostVersionEntityMock.getRepositoryVersion()).andReturn(repoVersion).anyTimes();
+ expect(repositoryVersionDAOMock.findByStackAndVersion(isA(StackId.class), isA(String.class))).andReturn(null).anyTimes();
+ expect(hostVersionEntityMock.getState()).andReturn(RepositoryVersionState.INSTALLING).anyTimes();
- AmbariManagementController managementController = createMock(AmbariManagementController.class);
- Clusters clusters = createNiceMock(Clusters.class);
- Cluster cluster = createNiceMock(Cluster.class);
+ replay(hostVersionDAOMock, hostVersionEntityMock, repositoryVersionDAOMock);
+ Set<Resource> resources = provider.getResources(request, predicate);
+ assertEquals(1, resources.size());
+ verify(hostVersionDAOMock, hostVersionEntityMock, repositoryVersionDAOMock);
+ }
+
+ @Test
+ public void testCreateResources() throws Exception {
StackId stackId = new StackId("HDP", "2.0.1");
final Host host1 = createNiceMock("host1", Host.class);
expect(host1.getHostName()).andReturn("host1").anyTimes();
expect(host1.getOsFamily()).andReturn("redhat6").anyTimes();
replay(host1);
- Map<String, Host> hostsForCluster = new HashMap<String, Host>() {{
- put(host1.getHostName(), host1);
- }};
+ Map<String, Host> hostsForCluster = Collections.singletonMap(host1.getHostName(), host1);
ServiceComponentHost sch = createMock(ServiceComponentHost.class);
List<ServiceComponentHost> schs = Collections.singletonList(sch);
- RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
- repoVersion.setOperatingSystems(operatingSystemsJson);
+
final ServiceOsSpecific.Package hivePackage = new ServiceOsSpecific.Package();
hivePackage.setName("hive");
final ServiceOsSpecific.Package mysqlPackage = new ServiceOsSpecific.Package();
mysqlPackage.setName("mysql");
mysqlPackage.setSkipUpgrade(Boolean.TRUE);
- List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>() {{
- add(hivePackage);
- add(mysqlPackage);
- }};
-
-
- ActionManager actionManager = createNiceMock(ActionManager.class);
-
- RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
- ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
- ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
-
- HostVersionEntity hostVersionEntityMock = createNiceMock(HostVersionEntity.class);
+ List<ServiceOsSpecific.Package> packages = Arrays.asList(hivePackage, mysqlPackage);
AbstractControllerResourceProvider.init(resourceProviderFactory);
- Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -179,7 +222,7 @@ public class HostStackVersionResourceProviderTest {
expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
- (Map<String, String>) anyObject(List.class), anyObject(String.class))).andReturn(packages).anyTimes();
+ anyObject(Map.class), anyObject(String.class))).andReturn(packages).anyTimes();
expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
eq(managementController))).andReturn(csvResourceProvider).anyTimes();
@@ -224,9 +267,9 @@ public class HostStackVersionResourceProviderTest {
injector.injectMembers(provider);
// add the property map to a set for the request. add more maps for multiple creates
- Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
// add properties to the request map
properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -248,42 +291,24 @@ public class HostStackVersionResourceProviderTest {
@Test
public void testCreateResources_in_out_of_sync_state() throws Exception {
- Resource.Type type = Resource.Type.HostStackVersion;
-
- AmbariManagementController managementController = createMock(AmbariManagementController.class);
- Clusters clusters = createNiceMock(Clusters.class);
- Cluster cluster = createNiceMock(Cluster.class);
StackId stackId = new StackId("HDP", "2.0.1");
final Host host1 = createNiceMock("host1", Host.class);
expect(host1.getHostName()).andReturn("host1").anyTimes();
expect(host1.getOsFamily()).andReturn("redhat6").anyTimes();
replay(host1);
- Map<String, Host> hostsForCluster = new HashMap<String, Host>() {{
- put(host1.getHostName(), host1);
- }};
+ Map<String, Host> hostsForCluster = Collections.singletonMap(host1.getHostName(), host1);
ServiceComponentHost sch = createMock(ServiceComponentHost.class);
List<ServiceComponentHost> schs = Collections.singletonList(sch);
- RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
- repoVersion.setOperatingSystems(operatingSystemsJson);
-
ServiceOsSpecific.Package hivePackage = new ServiceOsSpecific.Package();
hivePackage.setName("hive");
List<ServiceOsSpecific.Package> packages = Collections.singletonList(hivePackage);
- ActionManager actionManager = createNiceMock(ActionManager.class);
-
- RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
- ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
- ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
-
- HostVersionEntity hostVersionEntityMock = createNiceMock(HostVersionEntity.class);
-
AbstractControllerResourceProvider.init(resourceProviderFactory);
- Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+ Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -291,7 +316,7 @@ public class HostStackVersionResourceProviderTest {
expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
- (Map<String, String>) anyObject(List.class), anyObject(String.class))).andReturn(packages).anyTimes();
+ anyObject(Map.class), anyObject(String.class))).andReturn(packages).anyTimes();
expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
eq(managementController))).andReturn(csvResourceProvider).anyTimes();
@@ -336,9 +361,9 @@ public class HostStackVersionResourceProviderTest {
injector.injectMembers(provider);
// add the property map to a set for the request. add more maps for multiple creates
- Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
- Map<String, Object> properties = new LinkedHashMap<String, Object>();
+ Map<String, Object> properties = new LinkedHashMap<>();
// add properties to the request map
properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");