You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2016/10/19 16:58:15 UTC
[05/36] ambari git commit: AMBARI-18495 - Remove Unnecessary Locks
Inside Of Cluster Business Object Implementations (jonathanhurley)
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 6318545..7b119f2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -28,8 +28,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.persistence.RollbackException;
@@ -103,18 +101,12 @@ public class ClustersImpl implements Clusters {
private static final Logger LOG = LoggerFactory.getLogger(
ClustersImpl.class);
- private ConcurrentHashMap<String, Cluster> clusters;
- private ConcurrentHashMap<Long, Cluster> clustersById;
- private ConcurrentHashMap<String, Host> hosts;
- private ConcurrentHashMap<Long, Host> hostsById;
- private ConcurrentHashMap<String, Set<Cluster>> hostClusterMap;
- private ConcurrentHashMap<String, Set<Host>> clusterHostMap;
-
- private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
- private final Lock r = rwl.readLock();
- private final Lock w = rwl.writeLock();
-
- private volatile boolean clustersLoaded = false;
+ private final ConcurrentHashMap<String, Cluster> clusters = new ConcurrentHashMap<String, Cluster>();
+ private final ConcurrentHashMap<Long, Cluster> clustersById = new ConcurrentHashMap<Long, Cluster>();
+ private final ConcurrentHashMap<String, Host> hosts = new ConcurrentHashMap<String, Host>();
+ private final ConcurrentHashMap<Long, Host> hostsById = new ConcurrentHashMap<Long, Host>();
+ private final ConcurrentHashMap<String, Set<Cluster>> hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
+ private final ConcurrentHashMap<String, Set<Host>> clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
@Inject
private ClusterDAO clusterDAO;
@@ -168,33 +160,36 @@ public class ClustersImpl implements Clusters {
private AmbariEventPublisher eventPublisher;
@Inject
- public ClustersImpl() {
- clusters = new ConcurrentHashMap<String, Cluster>();
- clustersById = new ConcurrentHashMap<Long, Cluster>();
- hosts = new ConcurrentHashMap<String, Host>();
- hostsById = new ConcurrentHashMap<Long, Host>();
- hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
- clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
-
- LOG.info("Initializing the ClustersImpl");
- }
+ public ClustersImpl(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+ HostFactory hostFactory) {
- private void checkLoaded() {
- if (!clustersLoaded) {
- w.lock();
- try {
- if (!clustersLoaded) {
- loadClustersAndHosts();
- }
- clustersLoaded = true;
- } finally {
- w.unlock();
- }
- }
+ this.clusterDAO = clusterDAO;
+ this.clusterFactory = clusterFactory;
+ this.hostDAO = hostDAO;
+ this.hostFactory = hostFactory;
}
+ /**
+ * Inititalizes all of the in-memory state collections that this class
+ * unfortunately uses. It's annotated with {@link Inject} as a way to define a
+ * very simple lifecycle with Guice where the constructor is instantiated
+ * (allowing injected members) followed by this method which initiailizes the
+ * state of the instance.
+ * <p/>
+ * Because some of these stateful initializations may actually reference this
+ * {@link Clusters} instance, we must do this after the object has been
+ * instantiated and injected.
+ */
+ @Inject
@Transactional
- void loadClustersAndHosts() {
+ private void loadClustersAndHosts() {
+ List<HostEntity> hostEntities = hostDAO.findAll();
+ for (HostEntity hostEntity : hostEntities) {
+ Host host = hostFactory.create(hostEntity, true);
+ hosts.put(hostEntity.getHostName(), host);
+ hostsById.put(hostEntity.getHostId(), host);
+ }
+
for (ClusterEntity clusterEntity : clusterDAO.findAll()) {
Cluster currentCluster = clusterFactory.create(clusterEntity);
clusters.put(clusterEntity.getClusterName(), currentCluster);
@@ -202,13 +197,11 @@ public class ClustersImpl implements Clusters {
clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
}
- for (HostEntity hostEntity : hostDAO.findAll()) {
- Host host = hostFactory.create(hostEntity, true);
- hosts.put(hostEntity.getHostName(), host);
- hostsById.put(hostEntity.getHostId(), host);
+ for (HostEntity hostEntity : hostEntities) {
Set<Cluster> cSet = Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>());
hostClusterMap.put(hostEntity.getHostName(), cSet);
+ Host host = hosts.get(hostEntity.getHostName());
for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
clusterHostMap.get(clusterEntity.getClusterName()).add(host);
cSet.add(clusters.get(clusterEntity.getClusterName()));
@@ -225,66 +218,56 @@ public class ClustersImpl implements Clusters {
@Override
public void addCluster(String clusterName, StackId stackId, SecurityType securityType)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
- w.lock();
- try {
- if (clusters.containsKey(clusterName)) {
- throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
- + ", clusterName=" + clusterName);
- }
-
- // create an admin resource to represent this cluster
- ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
- if (resourceTypeEntity == null) {
- resourceTypeEntity = new ResourceTypeEntity();
- resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
- resourceTypeEntity.setName(ResourceType.CLUSTER.name());
- resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- }
+ if (clusters.containsKey(clusterName)) {
+ throw new DuplicateResourceException(
+ "Attempted to create a Cluster which already exists" + ", clusterName=" + clusterName);
+ }
- ResourceEntity resourceEntity = new ResourceEntity();
- resourceEntity.setResourceType(resourceTypeEntity);
+ // create an admin resource to represent this cluster
+ ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
+ if (resourceTypeEntity == null) {
+ resourceTypeEntity = new ResourceTypeEntity();
+ resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
+ resourceTypeEntity.setName(ResourceType.CLUSTER.name());
+ resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
+ }
- StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
- stackId.getStackVersion());
+ ResourceEntity resourceEntity = new ResourceEntity();
+ resourceEntity.setResourceType(resourceTypeEntity);
- // retrieve new cluster id
- // add cluster id -> cluster mapping into clustersById
- ClusterEntity clusterEntity = new ClusterEntity();
- clusterEntity.setClusterName(clusterName);
- clusterEntity.setDesiredStack(stackEntity);
- clusterEntity.setResource(resourceEntity);
- if (securityType != null) {
- clusterEntity.setSecurityType(securityType);
- }
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
- try {
- clusterDAO.create(clusterEntity);
- clusterEntity = clusterDAO.merge(clusterEntity);
- } catch (RollbackException e) {
- LOG.warn("Unable to create cluster " + clusterName, e);
- throw new AmbariException("Unable to create cluster " + clusterName, e);
- }
+ // retrieve new cluster id
+ // add cluster id -> cluster mapping into clustersById
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName(clusterName);
+ clusterEntity.setDesiredStack(stackEntity);
+ clusterEntity.setResource(resourceEntity);
+ if (securityType != null) {
+ clusterEntity.setSecurityType(securityType);
+ }
- cluster = clusterFactory.create(clusterEntity);
- clusters.put(clusterName, cluster);
- clustersById.put(cluster.getClusterId(), cluster);
- clusterHostMap.put(clusterName, new HashSet<Host>());
- } finally {
- w.unlock();
+ try {
+ clusterDAO.create(clusterEntity);
+ } catch (RollbackException e) {
+ LOG.warn("Unable to create cluster " + clusterName, e);
+ throw new AmbariException("Unable to create cluster " + clusterName, e);
}
+ cluster = clusterFactory.create(clusterEntity);
+ clusters.put(clusterName, cluster);
+ clustersById.put(cluster.getClusterId(), cluster);
+ clusterHostMap.put(clusterName,
+ Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+
cluster.setCurrentStackVersion(stackId);
}
@Override
public Cluster getCluster(String clusterName)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
if (clusterName != null) {
cluster = clusters.get(clusterName);
@@ -299,8 +282,6 @@ public class ClustersImpl implements Clusters {
@Override
public Cluster getCluster(Long clusterId)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
if (clusterId != null) {
cluster = clustersById.get(clusterId);
@@ -314,8 +295,6 @@ public class ClustersImpl implements Clusters {
@Override
public Cluster getClusterById(long id) throws AmbariException {
- checkLoaded();
-
Cluster cluster = clustersById.get(id);
if (null == cluster) {
throw new ClusterNotFoundException("clusterID=" + id);
@@ -327,6 +306,7 @@ public class ClustersImpl implements Clusters {
@Override
public void setCurrentStackVersion(String clusterName, StackId stackId)
throws AmbariException{
+
if(stackId == null || clusterName == null || clusterName.isEmpty()){
LOG.warn("Unable to set version for cluster " + clusterName);
throw new AmbariException("Unable to set"
@@ -334,19 +314,9 @@ public class ClustersImpl implements Clusters {
+ " for cluster " + clusterName);
}
- checkLoaded();
-
- Cluster cluster = null;
-
- r.lock();
- try {
- if (!clusters.containsKey(clusterName)) {
- throw new ClusterNotFoundException(clusterName);
- }
-
- cluster = clusters.get(clusterName);
- } finally {
- r.unlock();
+ Cluster cluster = clusters.get(clusterName);
+ if (null == cluster) {
+ throw new ClusterNotFoundException(clusterName);
}
cluster.setCurrentStackVersion(stackId);
@@ -354,15 +324,12 @@ public class ClustersImpl implements Clusters {
@Override
public List<Host> getHosts() {
- checkLoaded();
-
return new ArrayList<Host>(hosts.values());
}
@Override
public Set<Cluster> getClustersForHost(String hostname)
throws AmbariException {
- checkLoaded();
Set<Cluster> clusters = hostClusterMap.get(hostname);
if(clusters == null){
throw new HostNotFoundException(hostname);
@@ -378,19 +345,16 @@ public class ClustersImpl implements Clusters {
@Override
public Host getHost(String hostname) throws AmbariException {
- checkLoaded();
-
- if (!hosts.containsKey(hostname)) {
+ Host host = hosts.get(hostname);
+ if (null == host) {
throw new HostNotFoundException(hostname);
}
- return hosts.get(hostname);
+ return host;
}
@Override
public boolean hostExists(String hostname){
- checkLoaded();
-
return hosts.containsKey(hostname);
}
@@ -399,8 +363,6 @@ public class ClustersImpl implements Clusters {
*/
@Override
public boolean isHostMappedToCluster(String clusterName, String hostName) {
- checkLoaded();
-
Set<Cluster> clusters = hostClusterMap.get(hostName);
for (Cluster cluster : clusters) {
if (clusterName.equals(cluster.getClusterName())) {
@@ -413,8 +375,6 @@ public class ClustersImpl implements Clusters {
@Override
public Host getHostById(Long hostId) throws AmbariException {
- checkLoaded();
-
if (!hostsById.containsKey(hostId)) {
throw new HostNotFoundException("Host Id = " + hostId);
}
@@ -442,40 +402,32 @@ public class ClustersImpl implements Clusters {
*/
@Override
public void addHost(String hostname) throws AmbariException {
- checkLoaded();
-
if (hosts.containsKey(hostname)) {
throw new AmbariException(MessageFormat.format("Duplicate entry for Host {0}", hostname));
}
- w.lock();
+ HostEntity hostEntity = new HostEntity();
+ hostEntity.setHostName(hostname);
+ hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
- try {
- HostEntity hostEntity = new HostEntity();
- hostEntity.setHostName(hostname);
- hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
-
- //not stored to DB
- Host host = hostFactory.create(hostEntity, false);
- host.setAgentVersion(new AgentVersion(""));
- List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
- host.setDisksInfo(emptyDiskList);
- host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
- host.setHostAttributes(new HashMap<String, String>());
- host.setState(HostState.INIT);
+ // not stored to DB
+ Host host = hostFactory.create(hostEntity, false);
+ host.setAgentVersion(new AgentVersion(""));
+ List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
+ host.setDisksInfo(emptyDiskList);
+ host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
+ host.setHostAttributes(new HashMap<String, String>());
+ host.setState(HostState.INIT);
- // the hosts by ID map is updated separately since the host has not yet
- // been persisted yet - the below event is what causes the persist
- hosts.put(hostname, host);
+ // the hosts by ID map is updated separately since the host has not yet
+ // been persisted yet - the below event is what causes the persist
+ hosts.put(hostname, host);
- hostClusterMap.put(hostname, Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
+ hostClusterMap.put(hostname,
+ Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a host to Clusters"
- + ", hostname=" + hostname);
- }
- } finally {
- w.unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding a host to Clusters" + ", hostname=" + hostname);
}
// publish the event
@@ -494,42 +446,37 @@ public class ClustersImpl implements Clusters {
public void updateHostWithClusterAndAttributes(
Map<String, Set<String>> hostClusters,
Map<String, Map<String, String>> hostAttributes) throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- if (hostClusters != null) {
- Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
- Set<String> clusterNames = new HashSet<String>();
- for (Set<String> cSet : hostClusters.values()) {
- clusterNames.addAll(cSet);
- }
+ if (null == hostClusters || hostClusters.isEmpty()) {
+ return;
+ }
- for (String hostname : hostClusters.keySet()) {
- Host host = hostMap.get(hostname);
- Map<String, String> attributes = hostAttributes.get(hostname);
- if (attributes != null && !attributes.isEmpty()){
- host.setHostAttributes(attributes);
- }
+ Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
+ Set<String> clusterNames = new HashSet<String>();
+ for (Set<String> cSet : hostClusters.values()) {
+ clusterNames.addAll(cSet);
+ }
- host.refresh();
+ for (String hostname : hostClusters.keySet()) {
+ Host host = hostMap.get(hostname);
+ Map<String, String> attributes = hostAttributes.get(hostname);
+ if (attributes != null && !attributes.isEmpty()) {
+ host.setHostAttributes(attributes);
+ }
- Set<String> hostClusterNames = hostClusters.get(hostname);
- for (String clusterName : hostClusterNames) {
- if (clusterName != null && !clusterName.isEmpty()) {
- mapHostToCluster(hostname, clusterName);
- }
- }
+ host.refresh();
+
+ Set<String> hostClusterNames = hostClusters.get(hostname);
+ for (String clusterName : hostClusterNames) {
+ if (clusterName != null && !clusterName.isEmpty()) {
+ mapHostToCluster(hostname, clusterName);
}
}
- } finally {
- w.unlock();
}
}
private Map<String, Host> getHostsMap(Collection<String> hostSet) throws
HostNotFoundException {
- checkLoaded();
Map<String, Host> hostMap = new HashMap<String, Host>();
Host host = null;
@@ -557,15 +504,9 @@ public class ClustersImpl implements Clusters {
*/
@Override
public void mapHostsToCluster(Set<String> hostnames, String clusterName) throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
- for (String hostname : hostnames) {
- mapHostToCluster(hostname, clusterName, clusterVersionEntity);
- }
- } finally {
- w.unlock();
+ ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
+ for (String hostname : hostnames) {
+ mapHostToCluster(hostname, clusterName, clusterVersionEntity);
}
}
@@ -582,22 +523,15 @@ public class ClustersImpl implements Clusters {
Host host = null;
Cluster cluster = null;
- checkLoaded();
+ host = getHost(hostname);
+ cluster = getCluster(clusterName);
- r.lock();
- try {
- host = getHost(hostname);
- cluster = getCluster(clusterName);
-
- // check to ensure there are no duplicates
- for (Cluster c : hostClusterMap.get(hostname)) {
- if (c.getClusterName().equals(clusterName)) {
- throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
- clusterName + ", hostName=" + hostname);
- }
+ // check to ensure there are no duplicates
+ for (Cluster c : hostClusterMap.get(hostname)) {
+ if (c.getClusterName().equals(clusterName)) {
+ throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
+ clusterName + ", hostName=" + hostname);
}
- } finally {
- r.unlock();
}
if (!isOsSupportedByClusterStack(cluster, host)) {
@@ -615,14 +549,9 @@ public class ClustersImpl implements Clusters {
clusterId);
}
- w.lock();
- try {
- mapHostClusterEntities(hostname, clusterId);
- hostClusterMap.get(hostname).add(cluster);
- clusterHostMap.get(clusterName).add(host);
- } finally {
- w.unlock();
- }
+ mapHostClusterEntities(hostname, clusterId);
+ hostClusterMap.get(hostname).add(cluster);
+ clusterHostMap.get(clusterName).add(host);
cluster.refresh();
host.refresh();
@@ -638,8 +567,6 @@ public class ClustersImpl implements Clusters {
@Override
public void mapHostToCluster(String hostname, String clusterName)
throws AmbariException {
- checkLoaded();
-
ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
mapHostToCluster(hostname, clusterName, clusterVersionEntity);
}
@@ -662,169 +589,121 @@ public class ClustersImpl implements Clusters {
@Override
public Map<String, Cluster> getClusters() {
- checkLoaded();
- r.lock();
- try {
- return Collections.unmodifiableMap(clusters);
- } finally {
- r.unlock();
- }
+ return Collections.unmodifiableMap(clusters);
}
@Override
public void updateClusterName(String oldName, String newName) {
- w.lock();
- try {
- clusters.put(newName, clusters.remove(oldName));
- clusterHostMap.put(newName, clusterHostMap.remove(oldName));
- } finally {
- w.unlock();
- }
+ clusters.put(newName, clusters.remove(oldName));
+ clusterHostMap.put(newName, clusterHostMap.remove(oldName));
}
@Override
public void debugDump(StringBuilder sb) {
- r.lock();
- try {
- sb.append("Clusters=[ ");
- boolean first = true;
- for (Cluster c : clusters.values()) {
- if (!first) {
- sb.append(" , ");
- }
- first = false;
- sb.append("\n ");
- c.debugDump(sb);
- sb.append(" ");
+ sb.append("Clusters=[ ");
+ boolean first = true;
+ for (Cluster c : clusters.values()) {
+ if (!first) {
+ sb.append(" , ");
}
- sb.append(" ]");
- } finally {
- r.unlock();
+ first = false;
+ sb.append("\n ");
+ c.debugDump(sb);
+ sb.append(" ");
}
+ sb.append(" ]");
}
@Override
public Map<String, Host> getHostsForCluster(String clusterName)
throws AmbariException {
- checkLoaded();
- r.lock();
-
- try {
- Map<String, Host> hosts = new HashMap<String, Host>();
-
- for (Host h : clusterHostMap.get(clusterName)) {
- hosts.put(h.getHostName(), h);
- }
-
- return hosts;
- } finally {
- r.unlock();
+ Map<String, Host> hosts = new HashMap<String, Host>();
+ for (Host h : clusterHostMap.get(clusterName)) {
+ hosts.put(h.getHostName(), h);
}
+
+ return hosts;
}
@Override
public Map<Long, Host> getHostIdsForCluster(String clusterName)
throws AmbariException {
+ Map<Long, Host> hosts = new HashMap<Long, Host>();
- checkLoaded();
- r.lock();
-
- try {
- Map<Long, Host> hosts = new HashMap<Long, Host>();
-
- for (Host h : clusterHostMap.get(clusterName)) {
- HostEntity hostEntity = hostDAO.findByName(h.getHostName());
- hosts.put(hostEntity.getHostId(), h);
- }
-
- return hosts;
- } finally {
- r.unlock();
+ for (Host h : clusterHostMap.get(clusterName)) {
+ HostEntity hostEntity = hostDAO.findByName(h.getHostName());
+ hosts.put(hostEntity.getHostId(), h);
}
+
+ return hosts;
}
@Override
public void deleteCluster(String clusterName)
throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- Cluster cluster = getCluster(clusterName);
- if (!cluster.canBeRemoved()) {
- throw new AmbariException("Could not delete cluster"
- + ", clusterName=" + clusterName);
- }
- LOG.info("Deleting cluster " + cluster.getClusterName());
- cluster.delete();
+ Cluster cluster = getCluster(clusterName);
+ if (!cluster.canBeRemoved()) {
+ throw new AmbariException("Could not delete cluster" + ", clusterName=" + clusterName);
+ }
- //clear maps
- for (Set<Cluster> clusterSet : hostClusterMap.values()) {
- clusterSet.remove(cluster);
- }
- clusterHostMap.remove(cluster.getClusterName());
+ LOG.info("Deleting cluster " + cluster.getClusterName());
+ cluster.delete();
- Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
- for (ClusterVersionEntity clusterVersion : clusterVersions) {
- clusterVersionDAO.remove(clusterVersion);
- }
+ // clear maps
+ for (Set<Cluster> clusterSet : hostClusterMap.values()) {
+ clusterSet.remove(cluster);
+ }
+ clusterHostMap.remove(cluster.getClusterName());
- clusters.remove(clusterName);
- } finally {
- w.unlock();
+ Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
+ for (ClusterVersionEntity clusterVersion : clusterVersions) {
+ clusterVersionDAO.remove(clusterVersion);
}
+
+ clusters.remove(clusterName);
}
@Override
public void unmapHostFromCluster(String hostname, String clusterName) throws AmbariException {
final Cluster cluster = getCluster(clusterName);
- unmapHostFromClusters(hostname, Sets.newHashSet(cluster));
+ Host host = getHost(hostname);
+
+ unmapHostFromClusters(host, Sets.newHashSet(cluster));
+
+ cluster.refresh();
+ host.refresh();
}
@Transactional
- void unmapHostFromClusters(String hostname, Set<Cluster> clusters) throws AmbariException {
- Host host = null;
+ void unmapHostFromClusters(Host host, Set<Cluster> clusters) throws AmbariException {
HostEntity hostEntity = null;
- checkLoaded();
if (clusters.isEmpty()) {
return;
}
- r.lock();
- try {
- host = getHost(hostname);
- hostEntity = hostDAO.findByName(hostname);
- } finally {
- r.unlock();
- }
-
- w.lock();
- try {
- for (Cluster cluster : clusters) {
- long clusterId = cluster.getClusterId();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Unmapping host {} from cluster {} (id={})", hostname,
- cluster.getClusterName(), clusterId);
- }
-
- unmapHostClusterEntities(hostname, cluster.getClusterId());
-
- hostClusterMap.get(hostname).remove(cluster);
- clusterHostMap.get(cluster.getClusterName()).remove(host);
+ String hostname = host.getHostName();
+ hostEntity = hostDAO.findByName(hostname);
- host.refresh();
- cluster.refresh();
+ for (Cluster cluster : clusters) {
+ long clusterId = cluster.getClusterId();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Unmapping host {} from cluster {} (id={})", hostname, cluster.getClusterName(),
+ clusterId);
}
- deleteConfigGroupHostMapping(hostEntity.getHostId());
+ unmapHostClusterEntities(hostname, cluster.getClusterId());
- // Remove mapping of principals to the unmapped host
- kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
- } finally {
- w.unlock();
+ hostClusterMap.get(hostname).remove(cluster);
+ clusterHostMap.get(cluster.getClusterName()).remove(host);
}
+
+ deleteConfigGroupHostMapping(hostEntity.getHostId());
+
+ // Remove mapping of principals to the unmapped host
+ kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
}
@Transactional
@@ -890,88 +769,81 @@ public class ClustersImpl implements Clusters {
*/
@Transactional
void deleteHostEntityRelationships(String hostname) throws AmbariException {
- checkLoaded();
-
if (!hosts.containsKey(hostname)) {
throw new HostNotFoundException("Could not find host " + hostname);
}
- w.lock();
-
- try {
- HostEntity entity = hostDAO.findByName(hostname);
-
- if (entity == null) {
- return;
- }
- // Remove from all clusters in the cluster_host_mapping table.
- // This will also remove from kerberos_principal_hosts, hostconfigmapping, and configgrouphostmapping
- Set<Cluster> clusters = hostClusterMap.get(hostname);
- Set<Long> clusterIds = Sets.newHashSet();
- for (Cluster cluster: clusters) {
- clusterIds.add(cluster.getClusterId());
- }
-
+ HostEntity entity = hostDAO.findByName(hostname);
+ if (entity == null) {
+ return;
+ }
+ // Remove from all clusters in the cluster_host_mapping table.
+ // This will also remove from kerberos_principal_hosts, hostconfigmapping,
+ // and configgrouphostmapping
+ Set<Cluster> clusters = hostClusterMap.get(hostname);
+ Set<Long> clusterIds = Sets.newHashSet();
+ for (Cluster cluster : clusters) {
+ clusterIds.add(cluster.getClusterId());
+ }
- unmapHostFromClusters(hostname, clusters);
- hostDAO.refresh(entity);
+ Host host = hosts.get(hostname);
+ unmapHostFromClusters(host, clusters);
+ hostDAO.refresh(entity);
- hostVersionDAO.removeByHostName(hostname);
+ hostVersionDAO.removeByHostName(hostname);
- // Remove blueprint tasks before hostRoleCommands
- // TopologyLogicalTask owns the OneToOne relationship but Cascade is on HostRoleCommandEntity
- if (entity.getHostRoleCommandEntities() != null) {
- for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
- TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
- if (topologyLogicalTaskEnity != null) {
- topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
- hrcEntity.setTopologyLogicalTaskEntity(null);
- }
+ // Remove blueprint tasks before hostRoleCommands
+ // TopologyLogicalTask owns the OneToOne relationship but Cascade is on
+ // HostRoleCommandEntity
+ if (entity.getHostRoleCommandEntities() != null) {
+ for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
+ TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
+ if (topologyLogicalTaskEnity != null) {
+ topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
+ hrcEntity.setTopologyLogicalTaskEntity(null);
}
}
- for (Long clusterId: clusterIds) {
- for (TopologyRequestEntity topologyRequestEntity: topologyRequestDAO.findByClusterId(clusterId)) {
- TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
-
- for (TopologyHostRequestEntity topologyHostRequestEntity: topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
- if (hostname.equals(topologyHostRequestEntity.getHostName())) {
- topologyHostRequestDAO.remove(topologyHostRequestEntity);
- }
+ }
+
+ for (Long clusterId : clusterIds) {
+ for (TopologyRequestEntity topologyRequestEntity : topologyRequestDAO.findByClusterId(
+ clusterId)) {
+ TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
+
+ for (TopologyHostRequestEntity topologyHostRequestEntity : topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
+ if (hostname.equals(topologyHostRequestEntity.getHostName())) {
+ topologyHostRequestDAO.remove(topologyHostRequestEntity);
}
}
}
+ }
- entity.setHostRoleCommandEntities(null);
- hostRoleCommandDAO.removeByHostId(entity.getHostId());
+ entity.setHostRoleCommandEntities(null);
+ hostRoleCommandDAO.removeByHostId(entity.getHostId());
- entity.setHostStateEntity(null);
- hostStateDAO.removeByHostId(entity.getHostId());
- hostConfigMappingDAO.removeByHostId(entity.getHostId());
- serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
- requestOperationLevelDAO.removeByHostId(entity.getHostId());
- topologyHostInfoDAO.removeByHost(entity);
+ entity.setHostStateEntity(null);
+ hostStateDAO.removeByHostId(entity.getHostId());
+ hostConfigMappingDAO.removeByHostId(entity.getHostId());
+ serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
+ requestOperationLevelDAO.removeByHostId(entity.getHostId());
+ topologyHostInfoDAO.removeByHost(entity);
- // Remove from dictionaries
- hosts.remove(hostname);
- hostsById.remove(entity.getHostId());
+ // Remove from dictionaries
+ hosts.remove(hostname);
+ hostsById.remove(entity.getHostId());
- hostDAO.remove(entity);
+ hostDAO.remove(entity);
- // Note, if the host is still heartbeating, then new records will be re-inserted
- // into the hosts and hoststate tables
- } catch (Exception e) {
- throw new AmbariException("Could not remove host", e);
- } finally {
- w.unlock();
- }
+ // Note, if the host is still heartbeating, then new records will be
+ // re-inserted
+ // into the hosts and hoststate tables
}
@Override
public boolean checkPermission(String clusterName, boolean readOnly) {
-
Cluster cluster = findCluster(clusterName);
return (cluster == null && readOnly) || checkPermission(cluster, readOnly);
@@ -999,19 +871,14 @@ public class ClustersImpl implements Clusters {
*/
@Override
public int getClusterSize(String clusterName) {
- checkLoaded();
- r.lock();
-
int hostCount = 0;
- if (clusterHostMap.containsKey(clusterName) && clusterHostMap.get(clusterName) != null) {
+ Set<Host> hosts = clusterHostMap.get(clusterName);
+ if (null != hosts) {
hostCount = clusterHostMap.get(clusterName).size();
}
- r.unlock();
-
return hostCount;
-
}
// ----- helper methods ---------------------------------------------------
@@ -1064,4 +931,16 @@ public class ClustersImpl implements Clusters {
// TODO : should we log this?
return false;
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void invalidate(Cluster cluster) {
+ ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+ Cluster currentCluster = clusterFactory.create(clusterEntity);
+ clusters.put(clusterEntity.getClusterName(), currentCluster);
+ clustersById.put(currentCluster.getClusterId(), currentCluster);
+ clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
index 877e84d..17f1447 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
@@ -17,23 +17,25 @@
*/
package org.apache.ambari.server.utils;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
import org.eclipse.persistence.exceptions.DatabaseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.Callable;
-
/**
* Provides utility methods to support operations retry
* TODO injection as Guice singleon, static for now to avoid major modifications
*/
public class RetryHelper {
protected final static Logger LOG = LoggerFactory.getLogger(RetryHelper.class);
+ private static Clusters s_clusters;
private static ThreadLocal<Set<Cluster>> affectedClusters = new ThreadLocal<Set<Cluster>>(){
@Override
@@ -44,7 +46,8 @@ public class RetryHelper {
private static int operationsRetryAttempts = 0;
- public static void init(int operationsRetryAttempts) {
+ public static void init(Clusters clusters, int operationsRetryAttempts) {
+ s_clusters = clusters;
RetryHelper.operationsRetryAttempts = operationsRetryAttempts;
}
@@ -82,7 +85,7 @@ public class RetryHelper {
public static void invalidateAffectedClusters() {
for (Cluster cluster : affectedClusters.get()) {
- cluster.invalidateData();
+ s_clusters.invalidate(cluster);
}
}
@@ -90,7 +93,6 @@ public class RetryHelper {
RetryHelper.clearAffectedClusters();
int retryAttempts = RetryHelper.getOperationsRetryAttempts();
do {
-
try {
return command.call();
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index dd741e9..37a6ae0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -18,10 +18,12 @@
package org.apache.ambari.server.agent;
+import static org.easymock.EasyMock.createNiceMock;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import javax.persistence.EntityManager;
import javax.ws.rs.core.MediaType;
import org.apache.ambari.server.RandomPortJerseyTest;
@@ -35,6 +37,7 @@ import org.apache.ambari.server.agent.rest.AgentResource;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.security.SecurityHelper;
import org.apache.ambari.server.security.SecurityHelperImpl;
@@ -55,7 +58,6 @@ import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceImpl;
import org.apache.ambari.server.state.cluster.ClusterFactory;
import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupImpl;
@@ -80,7 +82,6 @@ import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.assistedinject.FactoryModuleBuilder;
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
@@ -296,7 +297,6 @@ public class AgentResourceTest extends RandomPortJerseyTest {
// The test will fail anyway
}
requestStaticInjection(AgentResource.class);
- bind(Clusters.class).to(ClustersImpl.class);
os_family = mock(OsFamily.class);
actionManager = mock(ActionManager.class);
ambariMetaInfo = mock(AmbariMetaInfo.class);
@@ -311,10 +311,12 @@ public class AgentResourceTest extends RandomPortJerseyTest {
bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
bind(DBAccessor.class).toInstance(mock(DBAccessor.class));
bind(HostRoleCommandDAO.class).toInstance(mock(HostRoleCommandDAO.class));
+ bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
+ bind(HostDAO.class).toInstance(createNiceMock(HostDAO.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
}
private void installDependencies() {
- install(new AmbariJpaPersistModule("ambari-javadb"));
install(new FactoryModuleBuilder().implement(
Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
install(new FactoryModuleBuilder().implement(
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 299002b..6e3f2e0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -59,7 +59,6 @@ import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.audit.AuditLogger;
import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.dao.HostDAO;
@@ -594,7 +593,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
public void testCommandReportOnHeartbeatUpdatedState()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -714,7 +713,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testUpgradeSpecificHandling() throws AmbariException, InvalidStateTransitionException {
+ public void testUpgradeSpecificHandling() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -891,7 +890,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeCompleteReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeCompleteReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -976,7 +975,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeFailReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeFailReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -1097,7 +1096,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeInProgressReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeInProgressReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 6fc6892..e813e66 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -24,6 +24,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
+import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -60,6 +61,7 @@ import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
@@ -135,7 +137,7 @@ public class HeartbeatTestHelper {
}
public Cluster getDummyCluster()
- throws AmbariException {
+ throws Exception {
Map<String, String> configProperties = new HashMap<String, String>() {{
put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -154,7 +156,7 @@ public class HeartbeatTestHelper {
public Cluster getDummyCluster(String clusterName, String desiredStackId,
Map<String, String> configProperties, Set<String> hostNames)
- throws AmbariException {
+ throws Exception {
StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
org.junit.Assert.assertNotNull(stackEntity);
@@ -177,6 +179,14 @@ public class HeartbeatTestHelper {
StackId stackId = new StackId(desiredStackId);
+ // because this test method goes around the Clusters business object, we
+ // forcefully will refresh the internal state so that any tests which
+ // incorrect use Clusters after calling this won't be affected
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+ method.setAccessible(true);
+ method.invoke(clusters);
+
Cluster cluster = clusters.getCluster(clusterName);
cluster.setDesiredStackVersion(stackId);
@@ -209,6 +219,7 @@ public class HeartbeatTestHelper {
Assert.assertNotNull(hostEntity);
hostEntities.add(hostEntity);
}
+
clusterEntity.setHostEntities(hostEntities);
clusters.mapHostsToCluster(hostNames, clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index aa7ef20..0f48cf6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -160,7 +160,7 @@ public class TestHeartbeatHandler {
}
@After
- public void teardown() throws AmbariException {
+ public void teardown() throws Exception {
injector.getInstance(PersistService.class).stop();
EasyMock.reset(auditLogger);
}
@@ -345,7 +345,7 @@ public class TestHeartbeatHandler {
@Test
- public void testRegistration() throws AmbariException,
+ public void testRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -376,7 +376,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationRecoveryConfig() throws AmbariException,
+ public void testRegistrationRecoveryConfig() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -441,7 +441,7 @@ public class TestHeartbeatHandler {
//
@Test
public void testRegistrationRecoveryConfigMaintenanceMode()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -495,7 +495,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationAgentConfig() throws AmbariException,
+ public void testRegistrationAgentConfig() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -527,7 +527,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationWithBadVersion() throws AmbariException,
+ public void testRegistrationWithBadVersion() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -570,7 +570,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
+ public void testRegistrationPublicHostname() throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -602,7 +602,7 @@ public class TestHeartbeatHandler {
@Test
- public void testInvalidOSRegistration() throws AmbariException,
+ public void testInvalidOSRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -630,7 +630,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testIncompatibleAgentRegistration() throws AmbariException,
+ public void testIncompatibleAgentRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -660,7 +660,7 @@ public class TestHeartbeatHandler {
@Test
public void testRegisterNewNode()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -745,7 +745,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testStateCommandsAtRegistration() throws AmbariException, InvalidStateTransitionException {
+ public void testStateCommandsAtRegistration() throws Exception, InvalidStateTransitionException {
List<StatusCommand> dummyCmds = new ArrayList<StatusCommand>();
StatusCommand statusCmd1 = new StatusCommand();
statusCmd1.setClusterName(DummyCluster);
@@ -781,7 +781,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
+ public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -839,7 +839,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testOPFailedEventForAbortedTask() throws AmbariException, InvalidStateTransitionException {
+ public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -1281,7 +1281,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testIgnoreCustomActionReport() throws AmbariException, InvalidStateTransitionException {
+ public void testIgnoreCustomActionReport() throws Exception, InvalidStateTransitionException {
CommandReport cr1 = new CommandReport();
cr1.setActionId(StageUtils.getActionId(requestId, stageId));
cr1.setTaskId(1);
@@ -1343,7 +1343,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testComponents() throws AmbariException,
+ public void testComponents() throws Exception,
InvalidStateTransitionException {
ComponentsResponse expected = new ComponentsResponse();
StackId dummyStackId = new StackId(DummyStackId);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
index c36f5fe..7e564a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
@@ -18,20 +18,27 @@
package org.apache.ambari.server.api.services;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
+import static org.junit.Assert.assertEquals;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
-import static org.junit.Assert.assertEquals;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.cluster.ClusterFactory;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
+import org.apache.ambari.server.state.host.HostFactory;
+import org.easymock.EasyMock;
/**
* Unit tests for ClusterService.
@@ -42,7 +49,17 @@ public class ClusterServiceTest extends BaseServiceTest {
@Override
public List<ServiceTestInvocation> getTestInvocations() throws Exception {
List<ServiceTestInvocation> listInvocations = new ArrayList<ServiceTestInvocation>();
- Clusters clusters = new TestClusters();
+
+ ClusterDAO clusterDAO = EasyMock.createNiceMock(ClusterDAO.class);
+ HostDAO hostDAO = EasyMock.createNiceMock(HostDAO.class);
+
+ EasyMock.expect(clusterDAO.findAll()).andReturn(new ArrayList<ClusterEntity>()).atLeastOnce();
+ EasyMock.expect(hostDAO.findAll()).andReturn(new ArrayList<HostEntity>()).atLeastOnce();
+
+ EasyMock.replay(clusterDAO, hostDAO);
+
+ Clusters clusters = new TestClusters(clusterDAO, EasyMock.createNiceMock(ClusterFactory.class),
+ hostDAO, EasyMock.createNiceMock(HostFactory.class));
ClusterService clusterService;
Method m;
@@ -161,6 +178,12 @@ public class ClusterServiceTest extends BaseServiceTest {
}
private class TestClusters extends ClustersImpl {
+ public TestClusters(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+ HostFactory hostFactory) {
+
+ super(clusterDAO, clusterFactory, hostDAO, hostFactory);
+ }
+
@Override
public boolean checkPermission(String clusterName, boolean readOnly) {
return true;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 93d261b..50f5abe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -18,12 +18,20 @@
package org.apache.ambari.server.configuration;
-import com.google.common.eventbus.EventBus;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.AmbariException;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.agent.HeartbeatTestHelper;
import org.apache.ambari.server.agent.RecoveryConfig;
import org.apache.ambari.server.agent.RecoveryConfigHelper;
@@ -40,19 +48,11 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.eventbus.EventBus;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
/**
* Test RecoveryConfigHelper class
@@ -86,7 +86,7 @@ public class RecoveryConfigHelperTest {
}
@After
- public void teardown() throws AmbariException {
+ public void teardown() throws Exception {
injector.getInstance(PersistService.class).stop();
}
@@ -95,7 +95,7 @@ public class RecoveryConfigHelperTest {
*/
@Test
public void testRecoveryConfigDefaultValues()
- throws AmbariException {
+ throws Exception {
RecoveryConfig recoveryConfig = recoveryConfigHelper.getDefaultRecoveryConfig();
assertEquals(recoveryConfig.getMaxLifetimeCount(), RecoveryConfigHelper.RECOVERY_LIFETIME_MAX_COUNT_DEFAULT);
assertEquals(recoveryConfig.getMaxCount(), RecoveryConfigHelper.RECOVERY_MAX_COUNT_DEFAULT);
@@ -107,11 +107,12 @@ public class RecoveryConfigHelperTest {
/**
* Test cluster-env properties from a dummy cluster
- * @throws AmbariException
+ *
+ * @throws Exception
*/
@Test
public void testRecoveryConfigValues()
- throws AmbariException {
+ throws Exception {
String hostname = "hostname1";
Cluster cluster = getDummyCluster(hostname);
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), hostname);
@@ -124,13 +125,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Install a component with auto start enabled. Verify that the old config was invalidated.
+ * Install a component with auto start enabled. Verify that the old config was
+ * invalidated.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testServiceComponentInstalled()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -161,12 +163,12 @@ public class RecoveryConfigHelperTest {
/**
* Uninstall a component and verify that the config is stale.
- *
- * @throws AmbariException
+ *
+ * @throws Exception
*/
@Test
public void testServiceComponentUninstalled()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -200,11 +202,11 @@ public class RecoveryConfigHelperTest {
/**
* Disable cluster level auto start and verify that the config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testClusterEnvConfigChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -238,13 +240,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Change the maintenance mode of a service component host and verify that config is stale.
+ * Change the maintenance mode of a service component host and verify that
+ * config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testMaintenanceModeChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -277,11 +280,11 @@ public class RecoveryConfigHelperTest {
/**
* Disable recovery on a component and verify that the config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testServiceComponentRecoveryChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -309,14 +312,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Test a cluster with two hosts. The first host gets the configuration during registration.
- * The second host gets it during it's first heartbeat.
+ * Test a cluster with two hosts. The first host gets the configuration during
+ * registration. The second host gets it during it's first heartbeat.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testMultiNodeCluster()
- throws AmbariException {
+ throws Exception {
Set<String> hostNames = new HashSet<String>() {{
add("Host1");
add("Host2");
@@ -351,7 +354,7 @@ public class RecoveryConfigHelperTest {
}
private Cluster getDummyCluster(Set<String> hostNames)
- throws AmbariException {
+ throws Exception {
Map<String, String> configProperties = new HashMap<String, String>() {{
put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -365,7 +368,7 @@ public class RecoveryConfigHelperTest {
}
private Cluster getDummyCluster(final String hostname)
- throws AmbariException {
+ throws Exception {
Set<String> hostNames = new HashSet<String>(){{
add(hostname);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 7e6a056..6ac607d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -18,10 +18,39 @@
package org.apache.ambari.server.controller;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.net.InetAddress;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.actionmanager.ActionManager;
@@ -73,7 +102,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
@@ -96,37 +124,11 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import javax.persistence.EntityManager;
-import java.net.InetAddress;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.getCurrentArguments;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
@SuppressWarnings("unchecked")
public class KerberosHelperTest extends EasyMockSupport {
@@ -203,7 +205,7 @@ public class KerberosHelperTest extends EasyMockSupport {
bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
bind(RoleGraphFactory.class).to(RoleGraphFactoryImpl.class);
- bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(ConfigHelper.class).toInstance(createNiceMock(ConfigHelper.class));
bind(KerberosOperationHandlerFactory.class).toInstance(kerberosOperationHandlerFactory);
bind(ClusterController.class).toInstance(clusterController);
@@ -225,7 +227,7 @@ public class KerberosHelperTest extends EasyMockSupport {
StageUtils.setTopologyManager(topologyManager);
expect(topologyManager.getPendingHostComponents()).andReturn(
Collections.<String, Collection<String>>emptyMap()).anyTimes();
-
+
StageUtils.setConfiguration(configuration);
expect(configuration.getApiSSLAuthentication()).andReturn(false).anyTimes();
expect(configuration.getClientApiPort()).andReturn(8080).anyTimes();
@@ -2538,7 +2540,7 @@ public class KerberosHelperTest extends EasyMockSupport {
expect(createKeytabFilesServerAction.createKeytab(capture(capturePrincipalForKeytab), eq("password"), eq(1), anyObject(KerberosOperationHandler.class), eq(true), eq(true), isNull(ActionLog.class)))
.andReturn(new Keytab())
.times(3);
-
+
replayAll();
AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
index c7261ea..70f0332 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
@@ -255,7 +255,10 @@ public class ClusterResourceProviderTest {
@Test
public void testCreateResourcesWithRetry() throws Exception {
- RetryHelper.init(3);
+ Clusters clusters = createMock(Clusters.class);
+ EasyMock.replay(clusters);
+
+ RetryHelper.init(clusters, 3);
Resource.Type type = Resource.Type.Cluster;
AmbariManagementController managementController = createMock(AmbariManagementController.class);
@@ -309,7 +312,7 @@ public class ClusterResourceProviderTest {
// verify
verify(managementController, response);
- RetryHelper.init(0);
+ RetryHelper.init(clusters, 0);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index de2d292..f605276 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -18,12 +18,11 @@
package org.apache.ambari.server.orm;
-import javax.persistence.EntityManager;
-import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import java.lang.reflect.Method;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
@@ -35,6 +34,8 @@ import java.util.Map;
import java.util.Set;
import java.util.UUID;
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
@@ -88,6 +89,7 @@ import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.alert.Scope;
import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.springframework.security.crypto.password.PasswordEncoder;
import com.google.inject.Inject;
@@ -96,6 +98,8 @@ import com.google.inject.Provider;
import com.google.inject.Singleton;
import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+
@Singleton
public class OrmTestHelper {
@@ -308,7 +312,7 @@ public class OrmTestHelper {
* @return the cluster ID.
*/
@Transactional
- public Long createCluster() {
+ public Long createCluster() throws Exception {
return createCluster(CLUSTER_NAME);
}
@@ -318,7 +322,7 @@ public class OrmTestHelper {
* @return the cluster ID.
*/
@Transactional
- public Long createCluster(String clusterName) {
+ public Long createCluster(String clusterName) throws Exception {
// required to populate the database with stacks
injector.getInstance(AmbariMetaInfo.class);
@@ -354,6 +358,15 @@ public class OrmTestHelper {
clusterEntity = clusterDAO.findByName(clusterEntity.getClusterName());
assertNotNull(clusterEntity);
assertTrue(clusterEntity.getClusterId() > 0);
+
+ // because this test method goes around the Clusters business object, we
+ // forcefully will refresh the internal state so that any tests which
+ // incorrect use Clusters after calling this won't be affected
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+ method.setAccessible(true);
+ method.invoke(clusters);
+
return clusterEntity.getClusterId();
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
index 4edfdcb..2e0c232 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
@@ -73,7 +73,7 @@ public class ClusterVersionDAOTest {
* Helper function to transition the cluster through several cluster versions.
* @param currStep Step to go to is a value from 1 - 7, inclusive.
*/
- private void createRecordsUntilStep(int currStep) {
+ private void createRecordsUntilStep(int currStep) throws Exception {
// Fresh install on A
if (currStep >= 1 && lastStep <= 0) {
clusterId = helper.createCluster();
@@ -147,7 +147,7 @@ public class ClusterVersionDAOTest {
}
@Test
- public void testFindByStackAndVersion() {
+ public void testFindByStackAndVersion() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(
0,
@@ -161,14 +161,14 @@ public class ClusterVersionDAOTest {
}
@Test
- public void testFindByCluster() {
+ public void testFindByCluster() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(0, clusterVersionDAO.findByCluster("non existing").size());
Assert.assertEquals(1, clusterVersionDAO.findByCluster(cluster.getClusterName()).size());
}
@Test
- public void testFindByClusterAndStackAndVersion() {
+ public void testFindByClusterAndStackAndVersion() throws Exception {
createRecordsUntilStep(1);
Assert.assertNull(clusterVersionDAO.findByClusterAndStackAndVersion(
cluster.getClusterName(), BAD_STACK, "non existing"));
@@ -181,7 +181,7 @@ public class ClusterVersionDAOTest {
* At all times the cluster should have a cluster version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}
*/
@Test
- public void testFindByClusterAndStateCurrent() {
+ public void testFindByClusterAndStateCurrent() throws Exception {
createRecordsUntilStep(1);
Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
@@ -208,7 +208,7 @@ public class ClusterVersionDAOTest {
* Test the state of certain cluster versions.
*/
@Test
- public void testFindByClusterAndState() {
+ public void testFindByClusterAndState() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
index 33e2636..c73843f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
@@ -22,9 +22,6 @@ import java.util.Arrays;
import java.util.List;
import java.util.Set;
-import com.google.inject.assistedinject.AssistedInject;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -36,10 +33,9 @@ import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.host.HostFactory;
import org.junit.After;
import org.junit.Before;
@@ -49,6 +45,8 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
+import junit.framework.Assert;
+
public class ConfigGroupDAOTest {
private Injector injector;
private ConfigGroupDAO configGroupDAO;
@@ -88,28 +86,13 @@ public class ConfigGroupDAOTest {
private ConfigGroupEntity createConfigGroup(String clusterName,
String groupName, String tag, String desc, List<HostEntity> hosts,
List<ClusterConfigEntity> configs) throws Exception {
- ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
-
- // create an admin resource to represent this cluster
- ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
- if (resourceTypeEntity == null) {
- resourceTypeEntity = new ResourceTypeEntity();
- resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
- resourceTypeEntity.setName(ResourceType.CLUSTER.name());
- resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- }
-
+ Clusters clusters = injector.getInstance(Clusters.class);
StackEntity stackEntity = stackDAO.find("HDP", "0.1");
- ResourceEntity resourceEntity = new ResourceEntity();
- resourceEntity.setResourceType(resourceTypeEntity);
+ clusters.addCluster(clusterName, new StackId(stackEntity));
+ ClusterEntity clusterEntity = clusterDAO.findByName(clusterName);
- ClusterEntity clusterEntity = new ClusterEntity();
- clusterEntity.setClusterName(clusterName);
- clusterEntity.setResource(resourceEntity);
- clusterEntity.setDesiredStack(stackEntity);
-
- clusterDAO.create(clusterEntity);
+ ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
configGroupEntity.setClusterEntity(clusterEntity);
configGroupEntity.setClusterId(clusterEntity.getClusterId());
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
index 9d390a9..1267f96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
@@ -18,6 +18,8 @@
package org.apache.ambari.server.orm.dao;
+import java.util.UUID;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -38,8 +40,6 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import java.util.UUID;
-
/**
* RepositoryVersionDAO unit tests.
*/
@@ -174,7 +174,7 @@ public class RepositoryVersionDAOTest {
}
@Test
- public void testDeleteCascade() {
+ public void testDeleteCascade() throws Exception {
long clusterId = helper.createCluster();
ClusterEntity cluster = clusterDAO.findById(clusterId);
createSingleRecord();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
index 77f87be..4029bae 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
@@ -17,8 +17,13 @@
*/
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNull;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -26,19 +31,15 @@ import org.apache.ambari.server.orm.entities.SettingEntity;
import org.junit.Before;
import org.junit.Test;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNull;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
public class SettingDAOTest {
private Injector injector;
private SettingDAO dao;
@Before
- public void setUp() {
+ public void setUp() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
dao = injector.getInstance(SettingDAO.class);
injector.getInstance(GuiceJpaInitializer.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
index 080558a..8be805a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
@@ -18,6 +18,9 @@
package org.apache.ambari.server.orm.dao;
+import java.util.LinkedList;
+import java.util.List;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -33,9 +36,6 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import java.util.LinkedList;
-import java.util.List;
-
/**
* WidgetDAO unit tests.
*/
@@ -49,7 +49,7 @@ public class WidgetDAOTest {
@Before
- public void before() {
+ public void before() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
widgetDAO = injector.getInstance(WidgetDAO.class);
widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
index 5b9ea6a..f50ae44 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
@@ -18,9 +18,9 @@
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import java.util.LinkedList;
+import java.util.List;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -32,8 +32,9 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import java.util.LinkedList;
-import java.util.List;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
/**
* WidgetLayoutDAO unit tests.
@@ -48,7 +49,7 @@ public class WidgetLayoutDAOTest {
@Before
- public void before() {
+ public void before() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);
widgetDAO = injector.getInstance(WidgetDAO.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index 02ce9fb..d8b6a83 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -21,16 +21,11 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
@@ -45,6 +40,8 @@ import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+
public class ConfigGroupTest {
private Clusters clusters;
@@ -109,6 +106,7 @@ public class ConfigGroupTest {
"HDFS", "New HDFS configs for h1", configs, hosts);
configGroup.persist();
+ cluster.addConfigGroup(configGroup);
return configGroup;
}