You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/10/19 16:08:14 UTC
[01/32] ambari git commit: AMBARI-18475 - Remove Global Cluster Lock
Shared Between Business Objects (jonathanhurley)
Repository: ambari
Updated Branches:
refs/heads/trunk dedcdf9af -> efe38bece
AMBARI-18475 - Remove Global Cluster Lock Shared Between Business Objects (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/561c6f2f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/561c6f2f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/561c6f2f
Branch: refs/heads/trunk
Commit: 561c6f2f38f9b262dda4acd7ff0526b7caf55bce
Parents: 8192601
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Sep 27 11:44:12 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Sep 27 15:34:40 2016 -0400
----------------------------------------------------------------------
.../ambari/annotations/ExperimentalFeature.java | 8 +-
.../AmbariManagementControllerImpl.java | 30 +-
.../alerts/AlertServiceStateListener.java | 122 ++++---
.../org/apache/ambari/server/state/Cluster.java | 7 -
.../apache/ambari/server/state/ConfigImpl.java | 98 +++--
.../org/apache/ambari/server/state/Service.java | 7 -
.../ambari/server/state/ServiceComponent.java | 7 -
.../server/state/ServiceComponentImpl.java | 364 +++++++------------
.../apache/ambari/server/state/ServiceImpl.java | 306 ++++++----------
.../server/state/cluster/ClusterImpl.java | 6 -
.../state/configgroup/ConfigGroupImpl.java | 92 ++---
.../svccomphost/ServiceComponentHostImpl.java | 227 +++++-------
.../server/update/HostUpdateHelperTest.java | 40 +-
13 files changed, 522 insertions(+), 792 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
index 1d5ba0e..7798f26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
+++ b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
@@ -18,6 +18,7 @@
package org.apache.ambari.annotations;
import java.util.concurrent.Executor;
+import java.util.concurrent.locks.Lock;
/**
* The {@link ExperimentalFeature} enumeration is meant to be used with the
@@ -40,5 +41,10 @@ public enum ExperimentalFeature {
/**
* Used for code that is targeted for patch upgrades
*/
- PATCH_UPGRADES
+ PATCH_UPGRADES,
+
+ /**
+ * The removal of the cluster global {@link Lock}
+ */
+ CLUSTER_GLOBAL_LOCK_REMOVAL
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 1fc9dbf..ac680a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -63,7 +63,6 @@ import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
import javax.persistence.RollbackException;
@@ -202,6 +201,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
import com.google.common.collect.Multimap;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
@@ -209,7 +209,6 @@ import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Singleton;
import com.google.inject.persist.Transactional;
-import com.google.common.collect.ListMultimap;
@Singleton
public class AmbariManagementControllerImpl implements AmbariManagementController {
@@ -3111,13 +3110,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
changedHosts, requestParameters, requestProperties,
runSmokeTest, reconfigureClients);
- Lock clusterWriteLock = cluster.getClusterGlobalLock().writeLock();
- clusterWriteLock.lock();
- try {
- updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
- } finally {
- clusterWriteLock.unlock();
- }
+ updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
+
return requestStages;
}
@@ -5166,13 +5160,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
- if (stackInfo == null)
+ if (stackInfo == null) {
throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+ }
ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
- if (extensionInfo == null)
+ if (extensionInfo == null) {
throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+ }
ExtensionHelper.validateDeleteLink(getClusters(), stackInfo, extensionInfo);
ambariMetaInfo.getStackManager().unlinkStackAndExtension(stackInfo, extensionInfo);
@@ -5202,13 +5198,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
- if (stackInfo == null)
+ if (stackInfo == null) {
throw new StackAccessException("stackName=" + request.getStackName() + ", stackVersion=" + request.getStackVersion());
+ }
ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(request.getExtensionName(), request.getExtensionVersion());
- if (extensionInfo == null)
+ if (extensionInfo == null) {
throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
+ }
ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
@@ -5265,13 +5263,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException {
StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
- if (stackInfo == null)
+ if (stackInfo == null) {
throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+ }
ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
- if (extensionInfo == null)
+ if (extensionInfo == null) {
throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+ }
ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
index da4cbf5..6f6cea8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.events.listeners.alerts;
import java.text.MessageFormat;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.locks.Lock;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.EagerSingleton;
@@ -34,7 +35,6 @@ import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
import org.apache.ambari.server.orm.dao.AlertDispatchDAO;
import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
import org.apache.ambari.server.orm.entities.AlertGroupEntity;
-import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.alert.AlertDefinition;
import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.eventbus.AllowConcurrentEvents;
import com.google.common.eventbus.Subscribe;
+import com.google.common.util.concurrent.Striped;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
@@ -95,7 +96,13 @@ public class AlertServiceStateListener {
* Used to retrieve a cluster using clusterId from event.
*/
@Inject
- private Provider<Clusters> clusters;
+ private Provider<Clusters> m_clusters;
+
+ /**
+ * Used for ensuring that the concurrent nature of the event handler methods
+ * don't collide when attempting to perform operations on the same service.
+ */
+ private Striped<Lock> m_locksByService = Striped.lazyWeakLock(20);
/**
* Constructor.
@@ -125,38 +132,46 @@ public class AlertServiceStateListener {
String stackVersion = event.getStackVersion();
String serviceName = event.getServiceName();
- // create the default alert group for the new service if absent; this MUST
- // be done before adding definitions so that they are properly added to the
- // default group
- if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
- try {
- m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
- } catch (AmbariException ambariException) {
- LOG.error("Unable to create a default alert group for {}",
- event.getServiceName(), ambariException);
- }
- }
+ Lock lock = m_locksByService.get(serviceName);
+ lock.lock();
- // populate alert definitions for the new service from the database, but
- // don't worry about sending down commands to the agents; the host
- // components are not yet bound to the hosts so we'd have no way of knowing
- // which hosts are invalidated; do that in another impl
try {
- Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
- stackName, stackVersion, serviceName);
+ // create the default alert group for the new service if absent; this MUST
+ // be done before adding definitions so that they are properly added to the
+ // default group
+ if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
+ try {
+ m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
+ } catch (AmbariException ambariException) {
+ LOG.error("Unable to create a default alert group for {}",
+ event.getServiceName(), ambariException);
+ }
+ }
- for (AlertDefinition definition : alertDefinitions) {
- AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
- clusterId,
- definition);
+ // populate alert definitions for the new service from the database, but
+ // don't worry about sending down commands to the agents; the host
+ // components are not yet bound to the hosts so we'd have no way of knowing
+ // which hosts are invalidated; do that in another impl
+ try {
+ Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
+ stackName, stackVersion, serviceName);
- m_definitionDao.create(entity);
+ for (AlertDefinition definition : alertDefinitions) {
+ AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
+ clusterId,
+ definition);
+
+ m_definitionDao.create(entity);
+ }
+ } catch (AmbariException ae) {
+ String message = MessageFormat.format(
+ "Unable to populate alert definitions from the database during installation of {0}",
+ serviceName);
+ LOG.error(message, ae);
}
- } catch (AmbariException ae) {
- String message = MessageFormat.format(
- "Unable to populate alert definitions from the database during installation of {0}",
- serviceName);
- LOG.error(message, ae);
+ }
+ finally {
+ lock.unlock();
}
}
@@ -170,43 +185,44 @@ public class AlertServiceStateListener {
@AllowConcurrentEvents
public void onAmbariEvent(ServiceRemovedEvent event) {
LOG.debug("Received event {}", event);
- Cluster cluster = null;
try {
- cluster = clusters.get().getClusterById(event.getClusterId());
+ m_clusters.get().getClusterById(event.getClusterId());
} catch (AmbariException e) {
- LOG.warn("Unable to retrieve cluster info for id: " + event.getClusterId());
+ LOG.warn("Unable to retrieve cluster with id {}", event.getClusterId());
+ return;
}
- if (cluster != null) {
- // TODO: Explicit locking used to prevent deadlock situation caused during cluster delete
- cluster.getClusterGlobalLock().writeLock().lock();
- try {
- List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
+ String serviceName = event.getServiceName();
+ Lock lock = m_locksByService.get(serviceName);
+ lock.lock();
+
+ try {
+ List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
event.getServiceName());
- for (AlertDefinitionEntity definition : definitions) {
- try {
- m_definitionDao.remove(definition);
- } catch (Exception exception) {
- LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(), exception);
- }
+ for (AlertDefinitionEntity definition : definitions) {
+ try {
+ m_definitionDao.remove(definition);
+ } catch (Exception exception) {
+ LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(),
+ exception);
}
+ }
- // remove the default group for the service
- AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
+ // remove the default group for the service
+ AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
event.getServiceName());
- if (null != group && group.isDefault()) {
- try {
- m_alertDispatchDao.remove(group);
- } catch (Exception exception) {
- LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
- }
+ if (null != group && group.isDefault()) {
+ try {
+ m_alertDispatchDao.remove(group);
+ } catch (Exception exception) {
+ LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
}
- } finally {
- cluster.getClusterGlobalLock().writeLock().unlock();
}
+ } finally {
+ lock.unlock();
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 2452df6..d141df8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -22,7 +22,6 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.ClusterResponse;
@@ -522,12 +521,6 @@ public interface Cluster {
Service addService(String serviceName) throws AmbariException;
/**
- * Get lock to control access to cluster structure
- * @return cluster-global lock
- */
- ReadWriteLock getClusterGlobalLock();
-
- /**
* Fetch desired configs for list of hosts in cluster
* @param hostIds
* @return
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 7b7a60b..28bcd5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -27,9 +27,6 @@ import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.ambari.annotations.TransactionalLock;
-import org.apache.ambari.annotations.TransactionalLock.LockArea;
-import org.apache.ambari.annotations.TransactionalLock.LockType;
import org.apache.ambari.server.events.ClusterConfigChangedEvent;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.dao.ClusterDAO;
@@ -365,69 +362,64 @@ public class ConfigImpl implements Config {
@Override
@Transactional
public void persist(boolean newConfig) {
- cluster.getClusterGlobalLock().writeLock().lock(); //null cluster is not expected, NPE anyway later in code
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
- if (newConfig) {
- ClusterConfigEntity entity = new ClusterConfigEntity();
- entity.setClusterEntity(clusterEntity);
- entity.setClusterId(cluster.getClusterId());
- entity.setType(getType());
- entity.setVersion(getVersion());
- entity.setTag(getTag());
- entity.setTimestamp(new Date().getTime());
- entity.setStack(clusterEntity.getDesiredStack());
- entity.setData(gson.toJson(getProperties()));
+ ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+ if (newConfig) {
+ ClusterConfigEntity entity = new ClusterConfigEntity();
+ entity.setClusterEntity(clusterEntity);
+ entity.setClusterId(cluster.getClusterId());
+ entity.setType(getType());
+ entity.setVersion(getVersion());
+ entity.setTag(getTag());
+ entity.setTimestamp(new Date().getTime());
+ entity.setStack(clusterEntity.getDesiredStack());
+ entity.setData(gson.toJson(getProperties()));
+
+ if (null != getPropertiesAttributes()) {
+ entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+ }
- if (null != getPropertiesAttributes()) {
- entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+ clusterDAO.createConfig(entity);
+ clusterEntity.getClusterConfigEntities().add(entity);
+
+ // save the entity, forcing a flush to ensure the refresh picks up the
+ // newest data
+ clusterDAO.merge(clusterEntity, true);
+ } else {
+ // only supporting changes to the properties
+ ClusterConfigEntity entity = null;
+
+ // find the existing configuration to update
+ for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
+ if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
+ && getVersion().equals(cfe.getVersion())) {
+ entity = cfe;
+ break;
}
+ }
+
+ // if the configuration was found, then update it
+ if (null != entity) {
+ LOG.debug(
+ "Updating {} version {} with new configurations; a new version will not be created",
+ getType(), getVersion());
- clusterDAO.createConfig(entity);
- clusterEntity.getClusterConfigEntities().add(entity);
+ entity.setData(gson.toJson(getProperties()));
// save the entity, forcing a flush to ensure the refresh picks up the
// newest data
clusterDAO.merge(clusterEntity, true);
- cluster.refresh();
- } else {
- // only supporting changes to the properties
- ClusterConfigEntity entity = null;
-
- // find the existing configuration to update
- for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
- if (getTag().equals(cfe.getTag()) &&
- getType().equals(cfe.getType()) &&
- getVersion().equals(cfe.getVersion())) {
- entity = cfe;
- break;
- }
- }
-
- // if the configuration was found, then update it
- if (null != entity) {
- LOG.debug(
- "Updating {} version {} with new configurations; a new version will not be created",
- getType(), getVersion());
-
- entity.setData(gson.toJson(getProperties()));
-
- // save the entity, forcing a flush to ensure the refresh picks up the
- // newest data
- clusterDAO.merge(clusterEntity, true);
- cluster.refresh();
- }
}
- } finally {
- readWriteLock.writeLock().unlock();
}
} finally {
- cluster.getClusterGlobalLock().writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
+ // re-load the entity associations for the cluster
+ cluster.refresh();
+
// broadcast the change event for the configuration
ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
getType(), getTag(), getVersion());
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index 7000574..48ab252 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -19,7 +19,6 @@
package org.apache.ambari.server.state;
import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.ServiceResponse;
@@ -99,12 +98,6 @@ public interface Service {
void delete() throws AmbariException;
/**
- * Get lock to control access to cluster structure
- * @return cluster-global lock
- */
- ReadWriteLock getClusterGlobalLock();
-
- /**
* Sets the maintenance state for the service
* @param state the state
*/
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index 983cbdf..8387ab8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -19,7 +19,6 @@
package org.apache.ambari.server.state;
import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.ServiceComponentResponse;
@@ -98,10 +97,4 @@ public interface ServiceComponent {
String hostName) throws AmbariException;
void delete() throws AmbariException;
-
- /**
- * Get lock to control access to cluster structure
- * @return cluster-global lock
- */
- ReadWriteLock getClusterGlobalLock();
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 3e805a0..282396d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -18,12 +18,16 @@
package org.apache.ambari.server.state;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ObjectNotFoundException;
import org.apache.ambari.server.ServiceComponentHostNotFoundException;
@@ -46,18 +50,18 @@ import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
public class ServiceComponentImpl implements ServiceComponent {
private final static Logger LOG =
LoggerFactory.getLogger(ServiceComponentImpl.class);
private final Service service;
- private final ReadWriteLock clusterGlobalLock;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final String componentName;
private final String displayName;
@@ -79,7 +83,7 @@ public class ServiceComponentImpl implements ServiceComponent {
private AmbariEventPublisher eventPublisher;
ServiceComponentDesiredStateEntity desiredStateEntity;
- private Map<String, ServiceComponentHost> hostComponents;
+ private ConcurrentMap<String, ServiceComponentHost> hostComponents;
/**
* Data access object used for lookup up stacks.
@@ -91,7 +95,6 @@ public class ServiceComponentImpl implements ServiceComponent {
public ServiceComponentImpl(@Assisted Service service,
@Assisted String componentName, Injector injector) throws AmbariException {
injector.injectMembers(this);
- clusterGlobalLock = service.getClusterGlobalLock();
this.service = service;
desiredStateEntity = new ServiceComponentDesiredStateEntity();
@@ -103,7 +106,7 @@ public class ServiceComponentImpl implements ServiceComponent {
desiredStateEntity.setRecoveryEnabled(false);
setDesiredStackVersion(service.getDesiredStackVersion());
- hostComponents = new HashMap<String, ServiceComponentHost>();
+ hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
StackId stackId = service.getDesiredStackVersion();
try {
@@ -129,7 +132,6 @@ public class ServiceComponentImpl implements ServiceComponent {
@Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
Injector injector) throws AmbariException {
injector.injectMembers(this);
- clusterGlobalLock = service.getClusterGlobalLock();
this.service = service;
desiredStateEntity = serviceComponentDesiredStateEntity;
@@ -153,7 +155,7 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", stackInfo=" + stackId.getStackId());
}
- hostComponents = new HashMap<String, ServiceComponentHost>();
+ hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
for (HostComponentStateEntity hostComponentStateEntity : desiredStateEntity.getHostComponentStateEntities()) {
HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
pk.setClusterId(hostComponentStateEntity.getClusterId());
@@ -179,11 +181,6 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- public ReadWriteLock getClusterGlobalLock() {
- return clusterGlobalLock;
- }
-
- @Override
public String getName() {
return componentName;
}
@@ -254,145 +251,84 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public Map<String, ServiceComponentHost> getServiceComponentHosts() {
- clusterGlobalLock.readLock().lock();
- try {
- readWriteLock.readLock().lock();
- try {
- return new HashMap<String, ServiceComponentHost>(hostComponents);
- } finally {
- readWriteLock.readLock().unlock();
- }
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ return new HashMap<String, ServiceComponentHost>(hostComponents);
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponentHosts(
Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- // TODO validation
- for (Entry<String, ServiceComponentHost> entry :
- hostComponents.entrySet()) {
- if (!entry.getKey().equals(entry.getValue().getHostName())) {
- throw new AmbariException("Invalid arguments in map"
- + ", hostname does not match the key in map");
- }
+ // TODO validation
+ for (Entry<String, ServiceComponentHost> entry :
+ hostComponents.entrySet()) {
+ if (!entry.getKey().equals(entry.getValue().getHostName())) {
+ throw new AmbariException("Invalid arguments in map"
+ + ", hostname does not match the key in map");
}
- for (ServiceComponentHost sch : hostComponents.values()) {
- addServiceComponentHost(sch);
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ }
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ addServiceComponentHost(sch);
}
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponentHost(
ServiceComponentHost hostComponent) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- // TODO validation
- // TODO ensure host belongs to cluster
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
- + ", clusterName=" + service.getCluster().getClusterName()
- + ", clusterId=" + service.getCluster().getClusterId()
- + ", serviceName=" + service.getName()
- + ", serviceComponentName=" + getName()
- + ", hostname=" + hostComponent.getHostName()
- + ", recoveryEnabled=" + isRecoveryEnabled());
- }
- if (hostComponents.containsKey(hostComponent.getHostName())) {
- throw new AmbariException("Cannot add duplicate ServiceComponentHost"
- + ", clusterName=" + service.getCluster().getClusterName()
- + ", clusterId=" + service.getCluster().getClusterId()
- + ", serviceName=" + service.getName()
- + ", serviceComponentName=" + getName()
- + ", hostname=" + hostComponent.getHostName()
- + ", recoveryEnabled=" + isRecoveryEnabled());
- }
- // FIXME need a better approach of caching components by host
- ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
- clusterImpl.addServiceComponentHost(hostComponent);
- hostComponents.put(hostComponent.getHostName(), hostComponent);
- } finally {
- readWriteLock.writeLock().unlock();
+ // TODO validation
+ // TODO ensure host belongs to cluster
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding a ServiceComponentHost to ServiceComponent" + ", clusterName="
+ + service.getCluster().getClusterName() + ", clusterId="
+ + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+ + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+ + ", recoveryEnabled=" + isRecoveryEnabled());
+ }
+
+ if (hostComponents.containsKey(hostComponent.getHostName())) {
+ throw new AmbariException("Cannot add duplicate ServiceComponentHost" + ", clusterName="
+ + service.getCluster().getClusterName() + ", clusterId="
+ + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+ + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+ + ", recoveryEnabled=" + isRecoveryEnabled());
}
+ // FIXME need a better approach of caching components by host
+ ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+ clusterImpl.addServiceComponentHost(hostComponent);
+ hostComponents.put(hostComponent.getHostName(), hostComponent);
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHost addServiceComponentHost(String hostName) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
- try {
- readWriteLock.writeLock().lock();
- try {
- // TODO validation
- // TODO ensure host belongs to cluster
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
- + ", clusterName=" + service.getCluster().getClusterName()
- + ", clusterId=" + service.getCluster().getClusterId()
- + ", serviceName=" + service.getName()
- + ", serviceComponentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled()
- + ", hostname=" + hostName);
- }
- if (hostComponents.containsKey(hostName)) {
- throw new AmbariException("Cannot add duplicate ServiceComponentHost"
- + ", clusterName=" + service.getCluster().getClusterName()
- + ", clusterId=" + service.getCluster().getClusterId()
- + ", serviceName=" + service.getName()
- + ", serviceComponentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled()
- + ", hostname=" + hostName);
- }
- ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
- // FIXME need a better approach of caching components by host
- ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
- clusterImpl.addServiceComponentHost(hostComponent);
-
- hostComponents.put(hostComponent.getHostName(), hostComponent);
-
- return hostComponent;
- } finally {
- readWriteLock.writeLock().unlock();
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
+ addServiceComponentHost(hostComponent);
+ return hostComponent;
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHost getServiceComponentHost(String hostname)
throws AmbariException {
- clusterGlobalLock.readLock().lock();
- try {
- readWriteLock.readLock().lock();
- try {
- if (!hostComponents.containsKey(hostname)) {
- throw new ServiceComponentHostNotFoundException(getClusterName(),
- getServiceName(), getName(), hostname);
- }
- return hostComponents.get(hostname);
- } finally {
- readWriteLock.readLock().unlock();
- }
- } finally {
- clusterGlobalLock.readLock().unlock();
+
+ if (!hostComponents.containsKey(hostname)) {
+ throw new ServiceComponentHostNotFoundException(getClusterName(),
+ getServiceName(), getName(), hostname);
}
+
+ return hostComponents.get(hostname);
}
@Override
@@ -580,38 +516,20 @@ public class ServiceComponentImpl implements ServiceComponent {
* transaction is not necessary before this calling this method.
*/
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void persist() {
- boolean clusterWriteLockAcquired = false;
- if (!persisted) {
- clusterGlobalLock.writeLock().lock();
- clusterWriteLockAcquired = true;
- }
-
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- if (!persisted) {
- // persist the new cluster topology and then release the cluster lock
- // as it has no more bearing on the rest of this persist() method
- persistEntities();
- clusterGlobalLock.writeLock().unlock();
- clusterWriteLockAcquired = false;
-
- refresh();
- // There refresh calls are no longer needed with cached references
- // not used on getters/setters
- // service.refresh();
- persisted = true;
- } else {
- saveIfPersisted(desiredStateEntity);
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ if (!persisted) {
+ // persist the new cluster topology
+ persistEntities();
+ refresh();
+ persisted = true;
+ } else {
+ saveIfPersisted(desiredStateEntity);
}
} finally {
- if (clusterWriteLockAcquired) {
- clusterGlobalLock.writeLock().unlock();
- }
+ readWriteLock.writeLock().unlock();
}
}
@@ -671,123 +589,95 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
- clusterGlobalLock.readLock().lock();
+ readWriteLock.readLock().lock();
try {
- readWriteLock.readLock().lock();
- try {
- // A component can be deleted if all it's host components
- // can be removed, irrespective of the state of
- // the component itself
- for (ServiceComponentHost sch : hostComponents.values()) {
- if (!sch.canBeRemoved()) {
- LOG.warn("Found non removable hostcomponent when trying to"
- + " delete service component"
- + ", clusterName=" + getClusterName()
- + ", serviceName=" + getServiceName()
- + ", componentName=" + getName()
- + ", state=" + sch.getState()
- + ", hostname=" + sch.getHostName());
- return false;
- }
+ // A component can be deleted if all it's host components
+ // can be removed, irrespective of the state of
+ // the component itself
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ if (!sch.canBeRemoved()) {
+ LOG.warn("Found non removable hostcomponent when trying to" + " delete service component"
+ + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName()
+ + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname="
+ + sch.getHostName());
+ return false;
}
- return true;
- } finally {
- readWriteLock.readLock().unlock();
}
+ return true;
} finally {
- clusterGlobalLock.readLock().unlock();
+ readWriteLock.readLock().unlock();
}
}
@Override
@Transactional
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteAllServiceComponentHosts() throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- LOG.info("Deleting all servicecomponenthosts for component"
- + ", clusterName=" + getClusterName()
- + ", serviceName=" + getServiceName()
- + ", componentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled());
- for (ServiceComponentHost sch : hostComponents.values()) {
- if (!sch.canBeRemoved()) {
- throw new AmbariException("Found non removable hostcomponent "
- + " when trying to delete"
- + " all hostcomponents from servicecomponent"
- + ", clusterName=" + getClusterName()
- + ", serviceName=" + getServiceName()
- + ", componentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled()
- + ", hostname=" + sch.getHostName());
- }
- }
-
- for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
- serviceComponentHost.delete();
+ LOG.info("Deleting all servicecomponenthosts for component" + ", clusterName="
+ + getClusterName() + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+ + ", recoveryEnabled=" + isRecoveryEnabled());
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ if (!sch.canBeRemoved()) {
+ throw new AmbariException("Found non removable hostcomponent " + " when trying to delete"
+ + " all hostcomponents from servicecomponent" + ", clusterName=" + getClusterName()
+ + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+ + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
}
+ }
- hostComponents.clear();
- } finally {
- readWriteLock.writeLock().unlock();
+ for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
+ serviceComponentHost.delete();
}
+
+ hostComponents.clear();
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteServiceComponentHosts(String hostname) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- ServiceComponentHost sch = getServiceComponentHost(hostname);
- LOG.info("Deleting servicecomponenthost for cluster"
+ ServiceComponentHost sch = getServiceComponentHost(hostname);
+ LOG.info("Deleting servicecomponenthost for cluster" + ", clusterName=" + getClusterName()
+ + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+ + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
+ if (!sch.canBeRemoved()) {
+ throw new AmbariException("Could not delete hostcomponent from cluster"
+ ", clusterName=" + getClusterName()
+ ", serviceName=" + getServiceName()
+ ", componentName=" + getName()
+ ", recoveryEnabled=" + isRecoveryEnabled()
+ ", hostname=" + sch.getHostName());
- if (!sch.canBeRemoved()) {
- throw new AmbariException("Could not delete hostcomponent from cluster"
- + ", clusterName=" + getClusterName()
- + ", serviceName=" + getServiceName()
- + ", componentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled()
- + ", hostname=" + sch.getHostName());
- }
- sch.delete();
- hostComponents.remove(hostname);
-
- } finally {
- readWriteLock.writeLock().unlock();
}
+ sch.delete();
+ hostComponents.remove(hostname);
+
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@Override
@Transactional
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- deleteAllServiceComponentHosts();
+ deleteAllServiceComponentHosts();
- if (persisted) {
- removeEntities();
- persisted = false;
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ if (persisted) {
+ removeEntities();
+ persisted = false;
}
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index 3120b86..36d4902 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -18,12 +18,15 @@
package org.apache.ambari.server.state;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ServiceComponentNotFoundException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -43,7 +46,6 @@ import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
@@ -51,19 +53,16 @@ import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
public class ServiceImpl implements Service {
- private final ReadWriteLock clusterGlobalLock;
private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
// Cached entity has only 1 getter for name
private ClusterServiceEntity serviceEntity;
@@ -113,7 +112,6 @@ public class ServiceImpl implements Service {
public ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
Injector injector) throws AmbariException {
injector.injectMembers(this);
- clusterGlobalLock = cluster.getClusterGlobalLock();
serviceEntity = new ClusterServiceEntity();
serviceEntity.setClusterId(cluster.getClusterId());
serviceEntity.setServiceName(serviceName);
@@ -145,7 +143,6 @@ public class ServiceImpl implements Service {
public ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity
serviceEntity, Injector injector) throws AmbariException {
injector.injectMembers(this);
- clusterGlobalLock = cluster.getClusterGlobalLock();
this.serviceEntity = serviceEntity;
this.cluster = cluster;
@@ -182,11 +179,6 @@ public class ServiceImpl implements Service {
}
@Override
- public ReadWriteLock getClusterGlobalLock() {
- return clusterGlobalLock;
- }
-
- @Override
public String getName() {
return serviceEntity.getServiceName();
}
@@ -207,83 +199,35 @@ public class ServiceImpl implements Service {
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponents(
Map<String, ServiceComponent> components) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
- try {
- readWriteLock.writeLock().lock();
- try {
- for (ServiceComponent sc : components.values()) {
- addServiceComponent(sc);
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ for (ServiceComponent sc : components.values()) {
+ addServiceComponent(sc);
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponent(ServiceComponent component) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
- try {
- readWriteLock.writeLock().lock();
- try {
- // TODO validation
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a ServiceComponent to Service"
- + ", clusterName=" + cluster.getClusterName()
- + ", clusterId=" + cluster.getClusterId()
- + ", serviceName=" + getName()
- + ", serviceComponentName=" + component.getName());
- }
- if (components.containsKey(component.getName())) {
- throw new AmbariException("Cannot add duplicate ServiceComponent"
- + ", clusterName=" + cluster.getClusterName()
- + ", clusterId=" + cluster.getClusterId()
- + ", serviceName=" + getName()
- + ", serviceComponentName=" + component.getName());
- }
- components.put(component.getName(), component);
- } finally {
- readWriteLock.writeLock().unlock();
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ if (components.containsKey(component.getName())) {
+ throw new AmbariException("Cannot add duplicate ServiceComponent"
+ + ", clusterName=" + cluster.getClusterName()
+ + ", clusterId=" + cluster.getClusterId()
+ + ", serviceName=" + getName()
+ + ", serviceComponentName=" + component.getName());
}
+
+ components.put(component.getName(), component);
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponent addServiceComponent(String serviceComponentName)
throws AmbariException {
- clusterGlobalLock.writeLock().lock();
- try {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a ServiceComponent to Service"
- + ", clusterName=" + cluster.getClusterName()
- + ", clusterId=" + cluster.getClusterId()
- + ", serviceName=" + getName()
- + ", serviceComponentName=" + serviceComponentName);
- }
- if (components.containsKey(serviceComponentName)) {
- throw new AmbariException("Cannot add duplicate ServiceComponent"
- + ", clusterName=" + cluster.getClusterName()
- + ", clusterId=" + cluster.getClusterId()
- + ", serviceName=" + getName()
- + ", serviceComponentName=" + serviceComponentName);
- }
- ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
- components.put(component.getName(), component);
- return component;
- } finally {
- readWriteLock.writeLock().unlock();
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
+ addServiceComponent(component);
+ return component;
}
@Override
@@ -460,36 +404,30 @@ public class ServiceImpl implements Service {
* transaction is not necessary before this calling this method.
*/
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void persist() {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- if (!persisted) {
- persistEntities();
- refresh();
- // There refresh calls are no longer needed with cached references
- // not used on getters/setters
- // cluster.refresh();
- persisted = true;
-
- // publish the service installed event
- StackId stackId = cluster.getDesiredStackVersion();
- cluster.addService(this);
-
- ServiceInstalledEvent event = new ServiceInstalledEvent(
- getClusterId(), stackId.getStackName(),
- stackId.getStackVersion(), getName());
-
- eventPublisher.publish(event);
- } else {
- saveIfPersisted();
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ if (!persisted) {
+ persistEntities();
+ refresh();
+
+ persisted = true;
+
+ // publish the service installed event
+ StackId stackId = cluster.getDesiredStackVersion();
+ cluster.addService(this);
+
+ ServiceInstalledEvent event = new ServiceInstalledEvent(
+ getClusterId(), stackId.getStackName(),
+ stackId.getStackVersion(), getName());
+
+ eventPublisher.publish(event);
+ } else {
+ saveIfPersisted();
}
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@@ -535,31 +473,26 @@ public class ServiceImpl implements Service {
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
- clusterGlobalLock.readLock().lock();
+ readWriteLock.readLock().lock();
try {
- readWriteLock.readLock().lock();
- try {
- //
- // A service can be deleted if all it's components
- // can be removed, irrespective of the state of
- // the service itself.
- //
- for (ServiceComponent sc : components.values()) {
- if (!sc.canBeRemoved()) {
- LOG.warn("Found non removable component when trying to delete service"
- + ", clusterName=" + cluster.getClusterName()
- + ", serviceName=" + getName()
- + ", componentName=" + sc.getName());
- return false;
- }
+ //
+ // A service can be deleted if all it's components
+ // can be removed, irrespective of the state of
+ // the service itself.
+ //
+ for (ServiceComponent sc : components.values()) {
+ if (!sc.canBeRemoved()) {
+ LOG.warn("Found non removable component when trying to delete service" + ", clusterName="
+ + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName="
+ + sc.getName());
+ return false;
}
- return true;
- } finally {
- readWriteLock.readLock().unlock();
}
+ return true;
} finally {
- clusterGlobalLock.readLock().unlock();
+ readWriteLock.readLock().unlock();
}
}
@@ -599,71 +532,56 @@ public class ServiceImpl implements Service {
serviceConfigDAO.remove(serviceConfigEntity);
}
}
-
+
@Override
@Transactional
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteAllComponents() throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- LOG.info("Deleting all components for service"
- + ", clusterName=" + cluster.getClusterName()
- + ", serviceName=" + getName());
- // FIXME check dependencies from meta layer
- for (ServiceComponent component : components.values()) {
- if (!component.canBeRemoved()) {
- throw new AmbariException("Found non removable component when trying to"
- + " delete all components from service"
- + ", clusterName=" + cluster.getClusterName()
- + ", serviceName=" + getName()
- + ", componentName=" + component.getName());
- }
- }
-
- for (ServiceComponent serviceComponent : components.values()) {
- serviceComponent.delete();
+ LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName()
+ + ", serviceName=" + getName());
+ // FIXME check dependencies from meta layer
+ for (ServiceComponent component : components.values()) {
+ if (!component.canBeRemoved()) {
+ throw new AmbariException("Found non removable component when trying to"
+ + " delete all components from service" + ", clusterName=" + cluster.getClusterName()
+ + ", serviceName=" + getName() + ", componentName=" + component.getName());
}
+ }
- components.clear();
- } finally {
- readWriteLock.writeLock().unlock();
+ for (ServiceComponent serviceComponent : components.values()) {
+ serviceComponent.delete();
}
+
+ components.clear();
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteServiceComponent(String componentName)
throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- ServiceComponent component = getServiceComponent(componentName);
- LOG.info("Deleting servicecomponent for cluster"
+ ServiceComponent component = getServiceComponent(componentName);
+ LOG.info("Deleting servicecomponent for cluster" + ", clusterName=" + cluster.getClusterName()
+ + ", serviceName=" + getName() + ", componentName=" + componentName);
+ // FIXME check dependencies from meta layer
+ if (!component.canBeRemoved()) {
+ throw new AmbariException("Could not delete component from cluster"
+ ", clusterName=" + cluster.getClusterName()
+ ", serviceName=" + getName()
+ ", componentName=" + componentName);
- // FIXME check dependencies from meta layer
- if (!component.canBeRemoved()) {
- throw new AmbariException("Could not delete component from cluster"
- + ", clusterName=" + cluster.getClusterName()
- + ", serviceName=" + getName()
- + ", componentName=" + componentName);
- }
-
- component.delete();
- components.remove(componentName);
- } finally {
- readWriteLock.writeLock().unlock();
}
+
+ component.delete();
+ components.remove(componentName);
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
-
-
}
@Override
@@ -673,34 +591,28 @@ public class ServiceImpl implements Service {
@Override
@Transactional
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() throws AmbariException {
- clusterGlobalLock.writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- deleteAllComponents();
- deleteAllServiceConfigs();
+ deleteAllComponents();
+ deleteAllServiceConfigs();
- if (persisted) {
- removeEntities();
- persisted = false;
+ if (persisted) {
+ removeEntities();
+ persisted = false;
- // publish the service removed event
- StackId stackId = cluster.getDesiredStackVersion();
+ // publish the service removed event
+ StackId stackId = cluster.getDesiredStackVersion();
- ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(),
- stackId.getStackName(), stackId.getStackVersion(), getName());
+ ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
+ stackId.getStackVersion(), getName());
- eventPublisher.publish(event);
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ eventPublisher.publish(event);
}
} finally {
- clusterGlobalLock.writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
-
-
}
@Transactional
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 2f7d6b9..a6f0a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -336,12 +336,6 @@ public class ClusterImpl implements Cluster {
this.eventPublisher = eventPublisher;
}
-
- @Override
- public ReadWriteLock getClusterGlobalLock() {
- return clusterGlobalLock;
- }
-
private void loadServiceConfigTypes() throws AmbariException {
try {
serviceConfigTypes = collectServiceConfigTypesMapping();
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 1d6b1e8..9917720 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -44,7 +44,6 @@ import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -80,8 +79,6 @@ public class ConfigGroupImpl implements ConfigGroup {
private ClusterDAO clusterDAO;
@Inject
Clusters clusters;
- @Inject
- private ConfigFactory configFactory;
@AssistedInject
public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
@@ -317,23 +314,18 @@ public class ConfigGroupImpl implements ConfigGroup {
@Override
public void persist() {
- cluster.getClusterGlobalLock().writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- if (!isPersisted) {
- persistEntities();
- refresh();
- cluster.refresh();
- isPersisted = true;
- } else {
- saveIfPersisted();
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ if (!isPersisted) {
+ persistEntities();
+ refresh();
+ cluster.refresh();
+ isPersisted = true;
+ } else {
+ saveIfPersisted();
}
} finally {
- cluster.getClusterGlobalLock().writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@@ -465,20 +457,15 @@ public class ConfigGroupImpl implements ConfigGroup {
@Override
public void delete() {
- cluster.getClusterGlobalLock().writeLock().lock();
+ readWriteLock.writeLock().lock();
try {
- readWriteLock.writeLock().lock();
- try {
- configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
- configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
- configGroupDAO.removeByPK(configGroupEntity.getGroupId());
- cluster.refresh();
- isPersisted = false;
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+ configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+ configGroupDAO.removeByPK(configGroupEntity.getGroupId());
+ cluster.refresh();
+ isPersisted = false;
} finally {
- cluster.getClusterGlobalLock().writeLock().unlock();
+ readWriteLock.writeLock().unlock();
}
}
@@ -526,40 +513,33 @@ public class ConfigGroupImpl implements ConfigGroup {
@Override
public ConfigGroupResponse convertToResponse() throws AmbariException {
- cluster.getClusterGlobalLock().readLock().lock();
+ readWriteLock.readLock().lock();
try {
- readWriteLock.readLock().lock();
- try {
- Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
- for (Host host : hosts.values()) {
- Map<String, Object> hostMap = new HashMap<String, Object>();
- hostMap.put("host_name", host.getHostName());
- hostnames.add(hostMap);
- }
+ Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+ for (Host host : hosts.values()) {
+ Map<String, Object> hostMap = new HashMap<String, Object>();
+ hostMap.put("host_name", host.getHostName());
+ hostnames.add(hostMap);
+ }
- Set<Map<String, Object>> configObjMap = new HashSet<Map<String,
- Object>>();
+ Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
- for (Config config : configurations.values()) {
- Map<String, Object> configMap = new HashMap<String, Object>();
- configMap.put(ConfigurationResourceProvider
- .CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, config.getType());
- configMap.put(ConfigurationResourceProvider
- .CONFIGURATION_CONFIG_TAG_PROPERTY_ID, config.getTag());
- configObjMap.add(configMap);
- }
+ for (Config config : configurations.values()) {
+ Map<String, Object> configMap = new HashMap<String, Object>();
+ configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+ config.getType());
+ configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+ config.getTag());
+ configObjMap.add(configMap);
+ }
- ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+ ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
configGroupEntity.getGroupId(), cluster.getClusterName(),
configGroupEntity.getGroupName(), configGroupEntity.getTag(),
- configGroupEntity.getDescription(),
- hostnames, configObjMap);
- return configGroupResponse;
- } finally {
- readWriteLock.readLock().unlock();
- }
+ configGroupEntity.getDescription(), hostnames, configObjMap);
+ return configGroupResponse;
} finally {
- cluster.getClusterGlobalLock().readLock().unlock();
+ readWriteLock.readLock().unlock();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 3b5ed28..7e345e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -30,6 +30,8 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.agent.AlertDefinitionCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -92,7 +94,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceComponentHostImpl.class);
- private final ReadWriteLock clusterGlobalLock;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private final Lock readLock = readWriteLock.readLock();
private final Lock writeLock = readWriteLock.writeLock();
@@ -751,7 +752,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
}
this.serviceComponent = serviceComponent;
- clusterGlobalLock = serviceComponent.getClusterGlobalLock();
HostEntity hostEntity = null;
try {
@@ -805,7 +805,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
Injector injector) {
injector.injectMembers(this);
this.serviceComponent = serviceComponent;
- clusterGlobalLock = serviceComponent.getClusterGlobalLock();
this.desiredStateEntity = desiredStateEntity;
this.stateEntity = stateEntity;
@@ -1029,6 +1028,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void handleEvent(ServiceComponentHostEvent event)
throws InvalidStateTransitionException {
if (LOG.isDebugEnabled()) {
@@ -1037,30 +1037,25 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
+ ", event=" + event.toString());
}
State oldState = getState();
- clusterGlobalLock.readLock().lock();
try {
+ writeLock.lock();
try {
- writeLock.lock();
- try {
- stateMachine.doTransition(event.getType(), event);
- getStateEntity().setCurrentState(stateMachine.getCurrentState());
- saveComponentStateEntityIfPersisted();
- // TODO Audit logs
- } catch (InvalidStateTransitionException e) {
- LOG.error("Can't handle ServiceComponentHostEvent event at"
- + " current state"
- + ", serviceComponentName=" + getServiceComponentName()
- + ", hostName=" + getHostName()
- + ", currentState=" + oldState
- + ", eventType=" + event.getType()
- + ", event=" + event);
- throw e;
- }
- } finally {
- writeLock.unlock();
+ stateMachine.doTransition(event.getType(), event);
+ getStateEntity().setCurrentState(stateMachine.getCurrentState());
+ saveComponentStateEntityIfPersisted();
+ // TODO Audit logs
+ } catch (InvalidStateTransitionException e) {
+ LOG.error("Can't handle ServiceComponentHostEvent event at"
+ + " current state"
+ + ", serviceComponentName=" + getServiceComponentName()
+ + ", hostName=" + getHostName()
+ + ", currentState=" + oldState
+ + ", eventType=" + event.getType()
+ + ", event=" + event);
+ throw e;
}
} finally {
- clusterGlobalLock.readLock().unlock();
+ writeLock.unlock();
}
if (!oldState.equals(getState())) {
@@ -1349,58 +1344,56 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
- clusterGlobalLock.readLock().lock();
+ readLock.lock();
try {
- readLock.lock();
- try {
- HostComponentStateEntity hostComponentStateEntity = getStateEntity();
- if (null == hostComponentStateEntity) {
- LOG.warn("Could not convert ServiceComponentHostResponse to a response. It's possible that Host " + getHostName() + " was deleted.");
- return null;
- }
-
- String clusterName = serviceComponent.getClusterName();
- String serviceName = serviceComponent.getServiceName();
- String serviceComponentName = serviceComponent.getName();
- String hostName = getHostName();
- String state = getState().toString();
- String stackId = getStackVersion().getStackId();
- String desiredState = getDesiredState().toString();
- String desiredStackId = getDesiredStackVersion().getStackId();
- HostComponentAdminState componentAdminState = getComponentAdminState();
- UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
-
- String displayName = null;
- try {
- ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
- getStackVersion().getStackVersion(), serviceName, serviceComponentName);
- displayName = compInfo.getDisplayName();
- } catch (AmbariException e) {
- displayName = serviceComponentName;
- }
+ HostComponentStateEntity hostComponentStateEntity = getStateEntity();
+ if (null == hostComponentStateEntity) {
+ LOG.warn(
+ "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
+ getHostName());
+ return null;
+ }
- ServiceComponentHostResponse r = new ServiceComponentHostResponse(
- clusterName, serviceName,
- serviceComponentName, displayName, hostName, state,
- stackId, desiredState,
- desiredStackId, componentAdminState);
+ String clusterName = serviceComponent.getClusterName();
+ String serviceName = serviceComponent.getServiceName();
+ String serviceComponentName = serviceComponent.getName();
+ String hostName = getHostName();
+ String state = getState().toString();
+ String stackId = getStackVersion().getStackId();
+ String desiredState = getDesiredState().toString();
+ String desiredStackId = getDesiredStackVersion().getStackId();
+ HostComponentAdminState componentAdminState = getComponentAdminState();
+ UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
+
+ String displayName = null;
+ try {
+ ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
+ getStackVersion().getStackVersion(), serviceName, serviceComponentName);
+ displayName = compInfo.getDisplayName();
+ } catch (AmbariException e) {
+ displayName = serviceComponentName;
+ }
- r.setActualConfigs(actualConfigs);
- r.setUpgradeState(upgradeState);
+ ServiceComponentHostResponse r = new ServiceComponentHostResponse(
+ clusterName, serviceName,
+ serviceComponentName, displayName, hostName, state,
+ stackId, desiredState,
+ desiredStackId, componentAdminState);
- try {
- r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
- } catch (Exception e) {
- LOG.error("Could not determine stale config", e);
- }
+ r.setActualConfigs(actualConfigs);
+ r.setUpgradeState(upgradeState);
- return r;
- } finally {
- readLock.unlock();
+ try {
+ r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
+ } catch (Exception e) {
+ LOG.error("Could not determine stale config", e);
}
+
+ return r;
} finally {
- clusterGlobalLock.readLock().unlock();
+ readLock.unlock();
}
}
@@ -1448,52 +1441,29 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
*/
@Override
public void persist() {
- boolean clusterWriteLockAcquired = false;
- if (!persisted) {
- clusterGlobalLock.writeLock().lock();
- clusterWriteLockAcquired = true;
- }
-
+ writeLock.lock();
try {
- writeLock.lock();
- try {
- if (!persisted) {
- // persist the new cluster topology and then release the cluster lock
- // as it has no more bearing on the rest of this persist() method
- persistEntities();
- persisted = true;
-
- clusterGlobalLock.writeLock().unlock();
- clusterWriteLockAcquired = false;
-
- // these should still be done with the internal lock
- refresh();
- // There refresh calls are no longer needed with cached references
- // not used on getters/setters
- // NOTE: Refreshing parents is a bad pattern.
- //host.refresh();
- //serviceComponent.refresh();
-
- // publish the service component installed event
- StackId stackId = getDesiredStackVersion();
-
- ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(
- getClusterId(), stackId.getStackName(),
- stackId.getStackVersion(), getServiceName(), getServiceComponentName(), getHostName(),
- isRecoveryEnabled());
-
- eventPublisher.publish(event);
- } else {
- saveComponentStateEntityIfPersisted();
- saveComponentDesiredStateEntityIfPersisted();
- }
- } finally {
- writeLock.unlock();
+ if (!persisted) {
+ // persist the new cluster topology
+ persistEntities();
+ persisted = true;
+
+ refresh();
+
+ // publish the service component installed event
+ StackId stackId = getDesiredStackVersion();
+
+ ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(getClusterId(),
+ stackId.getStackName(), stackId.getStackVersion(), getServiceName(),
+ getServiceComponentName(), getHostName(), isRecoveryEnabled());
+
+ eventPublisher.publish(event);
+ } else {
+ saveComponentStateEntityIfPersisted();
+ saveComponentDesiredStateEntityIfPersisted();
}
} finally {
- if (clusterWriteLockAcquired) {
- clusterGlobalLock.writeLock().unlock();
- }
+ writeLock.unlock();
}
}
@@ -1568,8 +1538,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
- clusterGlobalLock.readLock().lock();
boolean schLockAcquired = false;
try {
// if unable to read, then writers are writing; cannot remove SCH
@@ -1581,38 +1551,33 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
if (schLockAcquired) {
readLock.unlock();
}
- clusterGlobalLock.readLock().unlock();
}
}
@Override
+ @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() {
boolean fireRemovalEvent = false;
- clusterGlobalLock.writeLock().lock();
+ writeLock.lock();
try {
- writeLock.lock();
- try {
- if (persisted) {
- removeEntities();
-
- // host must be re-loaded from db to refresh the cached JPA HostEntity
- // that references HostComponentDesiredStateEntity
- // and HostComponentStateEntity JPA entities
- host.refresh();
+ if (persisted) {
+ removeEntities();
- persisted = false;
- fireRemovalEvent = true;
- }
+ // host must be re-loaded from db to refresh the cached JPA HostEntity
+ // that references HostComponentDesiredStateEntity
+ // and HostComponentStateEntity JPA entities
+ host.refresh();
- clusters.getCluster(getClusterName()).removeServiceComponentHost(this);
- } catch (AmbariException ex) {
- LOG.error("Unable to remove a service component from a host", ex);
- } finally {
- writeLock.unlock();
+ persisted = false;
+ fireRemovalEvent = true;
}
+
+ clusters.getCluster(getClusterName()).removeServiceComponentHost(this);
+ } catch (AmbariException ex) {
+ LOG.error("Unable to remove a service component from a host", ex);
} finally {
- clusterGlobalLock.writeLock().unlock();
+ writeLock.unlock();
}
// publish event for the removal of the SCH after the removal is
http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
index 387205d..f9dd5d1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
@@ -18,12 +18,18 @@
package org.apache.ambari.server.update;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariManagementController;
@@ -51,18 +57,13 @@ import org.easymock.EasyMock;
import org.easymock.EasyMockSupport;
import org.junit.Test;
-import javax.persistence.EntityManager;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonPrimitive;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
+import junit.framework.Assert;
public class HostUpdateHelperTest {
@@ -217,8 +218,6 @@ public class HostUpdateHelperTest {
ClusterConfigEntity mockClusterConfigEntity3 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
ClusterConfigEntity mockClusterConfigEntity4 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
StackEntity mockStackEntity = easyMockSupport.createNiceMock(StackEntity.class);
- ReadWriteLock mockReadWriteLock = easyMockSupport.createNiceMock(ReadWriteLock.class);
- Lock mockLock = easyMockSupport.createNiceMock(Lock.class);
Map<String, Map<String, String>> clusterHostsToChange = new HashMap<>();
Map<String, String> hosts = new HashMap<>();
List<ClusterConfigEntity> clusterConfigEntities1 = new ArrayList<>();
@@ -254,11 +253,8 @@ public class HostUpdateHelperTest {
expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
expect(mockClusters.getCluster("cl1")).andReturn(mockCluster).once();
- expect(mockCluster.getClusterGlobalLock()).andReturn(mockReadWriteLock).atLeastOnce();
expect(mockCluster.getClusterId()).andReturn(1L).atLeastOnce();
- expect(mockReadWriteLock.writeLock()).andReturn(mockLock).atLeastOnce();
-
expect(mockClusterEntity1.getClusterConfigEntities()).andReturn(clusterConfigEntities1).atLeastOnce();
expect(mockClusterEntity2.getClusterConfigEntities()).andReturn(clusterConfigEntities2).atLeastOnce();
[04/32] ambari git commit: AMBARI-18495 - Remove Unnecessary Locks
Inside Of Cluster Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 37a48f0..caa06e5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -23,7 +23,6 @@ import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
-import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -32,11 +31,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
import javax.persistence.EntityManager;
@@ -56,12 +51,10 @@ import org.apache.ambari.server.security.SecurityHelper;
import org.apache.ambari.server.security.TestAuthenticationFactory;
import org.apache.ambari.server.stack.StackManagerFactory;
import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.utils.SynchronousThreadPoolExecutor;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -841,7 +834,7 @@ public class ConfigHelperTest {
bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class));
bind(AmbariMetaInfo.class).toInstance(mockMetaInfo);
bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
- bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(ClusterController.class).toInstance(clusterController);
bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
index a5b0e5a5..c6c37c5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
@@ -21,8 +21,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import junit.framework.Assert;
-
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.RequestScheduleResponse;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -46,6 +44,8 @@ import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+
public class RequestExecutionTest {
private Injector injector;
private Clusters clusters;
@@ -120,7 +120,7 @@ public class RequestExecutionTest {
requestExecution.setDescription("Test Schedule");
requestExecution.persist();
-
+ cluster.addRequestExecution(requestExecution);
return requestExecution;
}
@@ -270,8 +270,7 @@ public class RequestExecutionTest {
Assert.assertNotNull(requestExecution);
Long id = requestExecution.getId();
-
- requestExecution.delete();
+ cluster.deleteRequestExecution(id);
Assert.assertNull(requestScheduleDAO.findById(id));
Assert.assertNull(cluster.getAllRequestExecutions().get(id));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index ec01b80..2ed5a2d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -18,6 +18,21 @@
package org.apache.ambari.server.state.cluster;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
@@ -34,22 +49,6 @@ import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.testing.DeadlockWarningThread;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.ServiceNotFoundException;
-import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
import org.easymock.EasyMock;
import org.junit.After;
import org.junit.Assert;
@@ -254,10 +253,6 @@ public class ClusterDeadlockTest {
"DATANODE", hostName));
}
- // !!! needed to populate some maps; without this, the cluster report
- // won't do anything and this test will be worthless
- ((ClusterImpl) cluster).loadServiceHostComponents();
-
List<Thread> threads = new ArrayList<Thread>();
for (int i = 0; i < NUMBER_OF_THREADS; i++) {
ClusterReaderThread clusterReaderThread = new ClusterReaderThread();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
index 2aaa2cf..f40e0a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
@@ -39,7 +39,9 @@ import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.scheduler.ExecutionScheduler;
@@ -75,6 +77,7 @@ import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
import junit.framework.Assert;
@@ -99,11 +102,10 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
expectClusterEntityMocks();
- AmbariEventPublisher eventPublisher = createNiceMock(AmbariEventPublisher.class);
-
replayAll();
- m_cluster = new ClusterImpl(m_clusterEntity, m_injector, eventPublisher);
+ ClusterFactory clusterFactory = m_injector.getInstance(ClusterFactory.class);
+ m_cluster = clusterFactory.create(m_clusterEntity);
verifyAll();
}
@@ -227,6 +229,12 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
new ArrayList<ClusterServiceEntity>()).anyTimes();
EasyMock.expect(m_clusterEntity.getClusterConfigEntities()).andReturn(
new ArrayList<ClusterConfigEntity>()).anyTimes();
+
+ EasyMock.expect(m_clusterEntity.getConfigGroupEntities()).andReturn(
+ new ArrayList<ConfigGroupEntity>()).anyTimes();
+
+ EasyMock.expect(m_clusterEntity.getRequestScheduleEntities()).andReturn(
+ new ArrayList<RequestScheduleEntity>()).anyTimes();
}
/**
@@ -262,13 +270,16 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
binder.bind(PasswordEncoder.class).toInstance(EasyMock.createNiceMock(PasswordEncoder.class));
binder.bind(KerberosHelper.class).toInstance(EasyMock.createNiceMock(KerberosHelper.class));
binder.bind(Users.class).toInstance(EasyMock.createNiceMock(Users.class));
+ binder.bind(AmbariEventPublisher.class).toInstance(createNiceMock(AmbariEventPublisher.class));
+ binder.install(new FactoryModuleBuilder().implement(
+ Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
try {
AmbariMetaInfo ambariMetaInfo = EasyMock.createNiceMock(AmbariMetaInfo.class);
EasyMock.expect(
ambariMetaInfo.getServices(EasyMock.anyString(), EasyMock.anyString())).andReturn(
- new HashMap<String, ServiceInfo>());
+ new HashMap<String, ServiceInfo>()).anyTimes();
EasyMock.replay(ambariMetaInfo);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index b99277d..6005ab5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -79,11 +79,8 @@ import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.orm.entities.HostStateEntity;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
import org.apache.ambari.server.state.AgentVersion;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
@@ -232,41 +229,26 @@ public class ClusterTest {
String clusterName = "c1";
- ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
- if (resourceTypeEntity == null) {
- resourceTypeEntity = new ResourceTypeEntity();
- resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
- resourceTypeEntity.setName(ResourceType.CLUSTER.name());
- resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- }
- ResourceEntity resourceEntity = new ResourceEntity();
- resourceEntity.setResourceType(resourceTypeEntity);
-
- ClusterEntity clusterEntity = new ClusterEntity();
- clusterEntity.setClusterName(clusterName);
- clusterEntity.setResource(resourceEntity);
- clusterEntity.setDesiredStack(stackEntity);
- clusterDAO.create(clusterEntity);
+ clusters.addCluster(clusterName, stackId);
Map<String, String> hostAttributes = new HashMap<String, String>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
- List<HostEntity> hostEntities = new ArrayList<HostEntity>();
Set<String> hostNames = new HashSet<String>() {{ add("h1"); add("h2"); }};
for (String hostName : hostNames) {
- HostEntity hostEntity = new HostEntity();
- hostEntity.setHostName(hostName);
+ clusters.addHost(hostName);
+ Host host = clusters.getHost(hostName);
+ host.persist();
+
+ HostEntity hostEntity = hostDAO.findByName(hostName);
hostEntity.setIpv4("ipv4");
hostEntity.setIpv6("ipv6");
hostEntity.setHostAttributes(gson.toJson(hostAttributes));
- hostEntity.setClusterEntities(Arrays.asList(clusterEntity));
- hostEntities.add(hostEntity);
- hostDAO.create(hostEntity);
+ hostDAO.merge(hostEntity);
}
- clusterEntity.setHostEntities(hostEntities);
- clusterDAO.merge(clusterEntity);
+ clusters.mapHostsToCluster(hostNames, clusterName);
c1 = clusters.getCluster(clusterName);
helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index 5d39841..29f40fb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -73,7 +73,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.stack.OsFamily;
import org.apache.ambari.server.topology.PersistedState;
@@ -117,7 +116,7 @@ public class StageUtilsTest extends EasyMockSupport {
bind(CredentialStoreService.class).toInstance(createNiceMock(CredentialStoreService.class));
bind(TopologyManager.class).toInstance(createNiceMock(TopologyManager.class));
bind(AmbariMetaInfo.class).toInstance(createMock(AmbariMetaInfo.class));
- bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
bind(ServiceComponentHostFactory.class).toInstance(createNiceMock(ServiceComponentHostFactory.class));
bind(StageFactory.class).to(StageFactoryImpl.class);
[26/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5dff9acd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5dff9acd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5dff9acd
Branch: refs/heads/trunk
Commit: 5dff9acdc388d276129fcca80c493f8d56ef58e2
Parents: d6a8471 e8544ba
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sun Oct 16 09:29:21 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sun Oct 16 09:29:21 2016 -0400
----------------------------------------------------------------------
LICENSE.txt | 4 +-
.../PreUpgradeCheckResourceProvider.java | 7 +-
.../0.96.0.2.0/package/scripts/params_linux.py | 4 +-
.../HIVE/configuration/hive-interactive-env.xml | 2 +-
.../PreUpgradeCheckResourceProviderTest.java | 4 +-
.../timeline/AMSPropertyProviderTest.java | 95 +-
.../ambari/server/state/ConfigHelperTest.java | 139 +-
.../server/upgrade/UpgradeCatalog220Test.java | 20 +-
ambari-web/app/app.js | 5 +
.../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes
.../fonts/glyphicons-halflings-regular.svg | 288 +
.../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes
.../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes
.../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes
ambari-web/app/config.js | 8 +-
ambari-web/app/controllers/application.js | 63 +-
ambari-web/app/controllers/installer.js | 11 +
.../main/admin/service_auto_start.js | 6 +-
.../main/admin/stack_and_upgrade_controller.js | 24 +-
.../admin/stack_upgrade_history_controller.js | 2 +-
.../add_alert_definition/step1_controller.js | 14 +-
.../alert_definitions_actions_controller.js | 14 +-
.../main/alerts/alert_instances_controller.js | 3 +-
.../alerts/manage_alert_groups_controller.js | 4 +-
.../app/controllers/main/charts/heatmap.js | 6 +-
.../controllers/main/service/info/summary.js | 3 +-
.../service/manage_config_groups_controller.js | 3 +-
.../app/controllers/wizard/step10_controller.js | 4 +-
.../app/controllers/wizard/step1_controller.js | 2 +-
.../wizard/step7/assign_master_controller.js | 3 +-
ambari-web/app/data/host/categories.js | 4 +-
ambari-web/app/messages.js | 26 +-
.../app/mixins/common/configs/configs_saver.js | 1 +
.../app/mixins/common/widgets/widget_mixin.js | 12 +-
.../mixins/main/dashboard/widgets/editable.js | 3 +-
.../dashboard/widgets/editable_with_limit.js | 3 +-
.../widgets/single_numeric_threshold.js | 3 +-
.../details/host_components/decommissionable.js | 7 +-
.../main/service/configs/config_overridable.js | 3 +-
.../service/configs/widget_popover_support.js | 5 +-
.../app/mixins/wizard/wizardProgressPageView.js | 12 +-
ambari-web/app/models/alerts/alert_config.js | 14 +-
.../app/models/alerts/alert_definition.js | 16 +-
ambari-web/app/models/alerts/alert_instance.js | 2 +-
ambari-web/app/models/authentication.js | 2 +-
ambari-web/app/models/host_component.js | 48 +-
ambari-web/app/models/hosts.js | 18 +-
ambari-web/app/models/repository.js | 6 +-
ambari-web/app/router.js | 5 +-
.../app/routes/activate_hawq_standby_routes.js | 3 +-
.../app/routes/add_alert_definition_routes.js | 3 +-
.../app/routes/add_hawq_standby_routes.js | 3 +-
ambari-web/app/routes/add_host_routes.js | 3 +-
ambari-web/app/routes/add_kerberos_routes.js | 3 +-
ambari-web/app/routes/add_service_routes.js | 3 +-
ambari-web/app/routes/create_widget.js | 3 +-
ambari-web/app/routes/edit_widget.js | 3 +-
.../app/routes/high_availability_routes.js | 3 +-
ambari-web/app/routes/main.js | 3 +-
.../app/routes/ra_high_availability_routes.js | 3 +-
ambari-web/app/routes/reassign_master_routes.js | 3 +-
.../app/routes/remove_hawq_standby_routes.js | 3 +-
.../app/routes/rm_high_availability_routes.js | 3 +-
ambari-web/app/routes/rollbackHA_routes.js | 3 +-
ambari-web/app/routes/stack_upgrade_routes.js | 3 +-
ambari-web/app/styles/alerts.less | 142 +-
ambari-web/app/styles/application.less | 1673 ++-
ambari-web/app/styles/bootstrap_overrides.less | 72 +
ambari-web/app/styles/common.less | 106 +-
ambari-web/app/styles/config_history_flow.less | 71 +-
.../app/styles/enhanced_service_dashboard.less | 75 +-
ambari-web/app/styles/log_file_search.less | 4 +-
ambari-web/app/styles/modal_popups.less | 352 +-
ambari-web/app/styles/stack_versions.less | 160 +-
.../app/styles/theme/bootstrap-ambari.css | 541 +
ambari-web/app/styles/widgets.less | 28 +-
ambari-web/app/templates/application.hbs | 205 +-
.../common/assign_master_components.hbs | 46 +-
.../app/templates/common/chart/linear_time.hbs | 4 +-
.../common/configs/addPropertyWindow.hbs | 64 +-
.../common/configs/compare_property.hbs | 28 +-
.../configs/config_history_dropdown_row.hbs | 10 +-
.../common/configs/config_history_flow.hbs | 46 +-
.../app/templates/common/configs/controls.hbs | 18 +-
.../common/configs/notifications_configs.hbs | 144 +-
.../common/configs/overriddenProperty.hbs | 74 +-
.../templates/common/configs/overrideWindow.hbs | 35 +-
.../configs/propertyDependence_footer.hbs | 2 +-
.../common/configs/queuePopup_body.hbs | 8 +-
.../common/configs/queuePopup_footer.hbs | 2 +-
.../common/configs/save_configuration.hbs | 10 +-
.../common/configs/selectCreateConfigGroup.hbs | 85 +-
.../templates/common/configs/service_config.hbs | 34 +-
.../common/configs/service_config_category.hbs | 52 +-
.../common/configs/service_config_wizard.hbs | 20 +-
.../common/configs/service_version_box.hbs | 10 +-
.../common/configs/services_config.hbs | 6 +-
.../configs/widgets/combo_config_widget.hbs | 6 +-
.../comparison/config_widget_comparison.hbs | 4 +-
.../common/configs/widgets/controls.hbs | 20 +-
.../configs/widgets/list_config_widget.hbs | 2 +-
.../overrides/config_widget_override.hbs | 2 +-
.../configs/widgets/plain_config_text_field.hbs | 6 +-
.../configs/widgets/radio_button_config.hbs | 10 +-
.../widgets/service_config_password_field.hbs | 26 +
.../widgets/test_db_connection_widget.hbs | 12 +-
.../app/templates/common/custom_date_popup.hbs | 34 +-
.../app/templates/common/editable_list.hbs | 6 +-
.../templates/common/filter_combo_cleanable.hbs | 47 +-
.../app/templates/common/filter_combobox.hbs | 12 +-
.../common/form/check_db_connection.hbs | 24 +-
.../app/templates/common/form/checkbox.hbs | 12 +-
ambari-web/app/templates/common/form/field.hbs | 6 +-
.../common/form/manage_credentilas_form.hbs | 8 +-
.../app/templates/common/form/spinner_input.hbs | 16 +-
ambari-web/app/templates/common/grid/filter.hbs | 10 +-
ambari-web/app/templates/common/grid/header.hbs | 2 +-
.../templates/common/host_progress_popup.hbs | 70 +-
.../common/host_progress_popup_footer.hbs | 10 +-
.../app/templates/common/log_file_search.hbs | 12 +-
ambari-web/app/templates/common/log_tail.hbs | 4 +-
ambari-web/app/templates/common/metric.hbs | 32 +-
ambari-web/app/templates/common/modal_popup.hbs | 85 +-
.../common/modal_popups/alerts_popup.hbs | 7 +-
.../modal_popups/cluster_check_dialog.hbs | 8 +-
.../modal_popups/confirmation_feedback.hbs | 10 +-
.../modal_popups/dependent_configs_list.hbs | 4 +-
.../modal_popups/hosts_table_list_popup.hbs | 2 +-
.../common/modal_popups/invalid_KDC_popup.hbs | 12 +-
.../common/modal_popups/log_tail_popup.hbs | 6 +-
.../common/modal_popups/logs_popup.hbs | 4 +-
.../common/modal_popups/prompt_popup.hbs | 12 +-
.../modal_popups/widget_browser_footer.hbs | 13 +-
.../modal_popups/widget_browser_popup.hbs | 14 +-
ambari-web/app/templates/common/progress.hbs | 18 +-
.../templates/common/rolling_restart_view.hbs | 12 +-
.../app/templates/common/selectable_popup.hbs | 4 +-
ambari-web/app/templates/common/settings.hbs | 12 +-
ambari-web/app/templates/common/time_range.hbs | 2 +-
.../templates/common/widget/gauge_widget.hbs | 12 +-
.../templates/common/widget/graph_widget.hbs | 14 +-
.../templates/common/widget/heatmap_widget.hbs | 6 +-
.../templates/common/widget/number_widget.hbs | 12 +-
.../templates/common/widget/template_widget.hbs | 12 +-
ambari-web/app/templates/experimental.hbs | 12 +-
ambari-web/app/templates/installer.hbs | 8 +-
ambari-web/app/templates/login.hbs | 20 +-
ambari-web/app/templates/main.hbs | 4 +-
ambari-web/app/templates/main/admin.hbs | 8 +-
.../hawq/activateStandby/step2.hbs | 6 +-
.../hawq/activateStandby/wizard.hbs | 8 +-
.../highAvailability/hawq/addStandby/step3.hbs | 4 +-
.../highAvailability/hawq/addStandby/wizard.hbs | 8 +-
.../hawq/removeStandby/step2.hbs | 4 +-
.../hawq/removeStandby/wizard.hbs | 8 +-
.../nameNode/rollbackHA/rollback_wizard.hbs | 8 +-
.../admin/highAvailability/nameNode/step1.hbs | 10 +-
.../admin/highAvailability/nameNode/step3.hbs | 18 +-
.../admin/highAvailability/nameNode/step4.hbs | 2 +-
.../admin/highAvailability/nameNode/wizard.hbs | 8 +-
.../highAvailability/rangerAdmin/step1.hbs | 10 +-
.../highAvailability/rangerAdmin/step3.hbs | 4 +-
.../highAvailability/rangerAdmin/wizard.hbs | 8 +-
.../highAvailability/resourceManager/step3.hbs | 4 +-
.../highAvailability/resourceManager/wizard.hbs | 8 +-
.../app/templates/main/admin/kerberos.hbs | 4 +-
.../templates/main/admin/kerberos/disable.hbs | 2 +-
.../kerberos/notify_security_off_popup.hbs | 2 +-
.../app/templates/main/admin/kerberos/step1.hbs | 14 +-
.../app/templates/main/admin/kerberos/step2.hbs | 2 +-
.../app/templates/main/admin/kerberos/step3.hbs | 18 +-
.../app/templates/main/admin/kerberos/step4.hbs | 2 +-
.../app/templates/main/admin/kerberos/step5.hbs | 4 +-
.../templates/main/admin/kerberos/wizard.hbs | 10 +-
.../templates/main/admin/serviceAccounts.hbs | 2 +-
.../templates/main/admin/service_auto_start.hbs | 32 +-
.../admin/stack_upgrade/edit_repositories.hbs | 26 +-
.../admin/stack_upgrade/failed_hosts_modal.hbs | 32 +-
.../main/admin/stack_upgrade/services.hbs | 6 +-
.../stack_upgrade/stack_upgrade_wizard.hbs | 56 +-
.../upgrade_configs_merge_table.hbs | 2 +-
.../main/admin/stack_upgrade/upgrade_group.hbs | 20 +-
.../admin/stack_upgrade/upgrade_history.hbs | 9 +-
.../stack_upgrade/upgrade_history_details.hbs | 8 +-
.../admin/stack_upgrade/upgrade_options.hbs | 20 +-
.../main/admin/stack_upgrade/upgrade_task.hbs | 33 +-
.../admin/stack_upgrade/upgrade_version_box.hbs | 14 +-
.../stack_upgrade/upgrade_version_column.hbs | 2 +-
.../main/admin/stack_upgrade/versions.hbs | 16 +-
ambari-web/app/templates/main/alerts.hbs | 11 +-
.../add_alert_definition.hbs | 8 +-
.../main/alerts/add_alert_definition/step2.hbs | 2 +-
.../main/alerts/add_alert_definition/step3.hbs | 4 +-
...ustom_config_to_alert_notification_popup.hbs | 16 +-
.../alerts/add_definition_to_group_popup.hbs | 129 +-
.../main/alerts/alert_definitions_actions.hbs | 2 +-
.../app/templates/main/alerts/configs.hbs | 50 +-
.../alerts/configs/alert_config_parameter.hbs | 19 +-
.../alerts/configs/alert_config_text_area.hbs | 20 +
.../alerts/configs/alert_config_text_field.hbs | 12 +-
.../alerts/configs/alert_config_threshold.hbs | 30 +-
.../main/alerts/create_alert_notification.hbs | 256 +-
.../main/alerts/create_new_alert_group.hbs | 14 +-
.../main/alerts/definition_details.hbs | 95 +-
.../main/alerts/manage_alert_groups_popup.hbs | 148 +-
.../alerts/manage_alert_notifications_popup.hbs | 72 +-
.../app/templates/main/charts/heatmap.hbs | 10 +-
.../templates/main/charts/heatmap_dropdown.hbs | 6 +-
.../app/templates/main/charts/linear_time.hbs | 4 +-
ambari-web/app/templates/main/dashboard.hbs | 6 +-
.../templates/main/dashboard/config_history.hbs | 9 +-
.../main/dashboard/edit_widget_popup.hbs | 28 +-
.../edit_widget_popup_single_threshold.hbs | 20 +-
.../main/dashboard/plus_button_filter.hbs | 12 +-
.../app/templates/main/dashboard/widgets.hbs | 50 +-
.../main/dashboard/widgets/cluster_metrics.hbs | 14 +-
.../main/dashboard/widgets/hbase_links.hbs | 14 +-
.../main/dashboard/widgets/hdfs_links.hbs | 14 +-
.../main/dashboard/widgets/pie_chart.hbs | 16 +-
.../main/dashboard/widgets/simple_text.hbs | 16 +-
.../templates/main/dashboard/widgets/uptime.hbs | 12 +-
.../main/dashboard/widgets/yarn_links.hbs | 14 +-
ambari-web/app/templates/main/host.hbs | 20 +-
ambari-web/app/templates/main/host/add.hbs | 11 +-
.../app/templates/main/host/addHost/step4.hbs | 15 +-
.../main/host/bulk_operation_confirm_popup.hbs | 2 +-
.../templates/main/host/bulk_operation_menu.hbs | 4 +-
.../templates/main/host/combo_search_box.hbs | 2 +-
ambari-web/app/templates/main/host/configs.hbs | 6 +-
.../app/templates/main/host/decommission.hbs | 9 +-
.../main/host/delete_hosts_dry_run_popup.hbs | 14 +-
.../templates/main/host/delete_hosts_popup.hbs | 8 +-
.../main/host/delete_hosts_result_popup.hbs | 14 +-
ambari-web/app/templates/main/host/details.hbs | 8 +-
.../main/host/details/deleteComponentPopup.hbs | 14 +-
.../main/host/details/doDeleteHostPopup.hbs | 18 +-
.../main/host/details/host_component.hbs | 14 +-
.../details/raiseDeleteComponentErrorPopup.hbs | 6 +-
.../app/templates/main/host/host_alerts.hbs | 11 +-
.../app/templates/main/host/log_metrics.hbs | 4 +-
ambari-web/app/templates/main/host/logs.hbs | 9 +-
.../app/templates/main/host/rack_id_popup.hbs | 6 +-
.../app/templates/main/host/stack_versions.hbs | 11 +-
ambari-web/app/templates/main/host/summary.hbs | 264 +-
ambari-web/app/templates/main/menu_item.hbs | 6 +-
ambari-web/app/templates/main/service.hbs | 6 +-
ambari-web/app/templates/main/service/add.hbs | 10 +-
.../main/service/all_services_actions.hbs | 14 +-
.../app/templates/main/service/info/configs.hbs | 6 +-
.../main/service/info/configs_save_popup.hbs | 4 +-
.../service/info/confirm_delete_service.hbs | 2 +-
.../main/service/info/heatmap_dropdown.hbs | 4 +-
.../flume/flume_agent_metrics_section.hbs | 26 +-
.../info/metrics/hive/hive_quick_links.hbs | 4 +-
.../main/service/info/save_popup_footer.hbs | 4 +-
.../main/service/info/service_alert_popup.hbs | 9 +-
.../app/templates/main/service/info/summary.hbs | 14 +-
ambari-web/app/templates/main/service/item.hbs | 10 +-
.../manage_configuration_groups_popup.hbs | 151 +-
.../app/templates/main/service/menu_item.hbs | 15 +-
.../templates/main/service/new_config_group.hbs | 30 +-
.../app/templates/main/service/reassign.hbs | 8 +-
.../templates/main/service/reassign/step3.hbs | 2 +-
.../app/templates/main/service/reconfigure.hbs | 4 +-
.../app/templates/main/service/service.hbs | 4 +-
.../templates/main/service/services/flume.hbs | 10 +-
.../templates/main/service/services/hbase.hbs | 2 +-
.../templates/main/service/services/hdfs.hbs | 2 +-
.../main/service/services/mapreduce2.hbs | 4 +-
.../templates/main/service/services/oozie.hbs | 4 +-
.../templates/main/service/services/storm.hbs | 2 +-
.../templates/main/service/services/yarn.hbs | 2 +-
.../main/service/services/zookeeper.hbs | 4 +-
.../main/service/widgets/create/expression.hbs | 39 +-
.../main/service/widgets/create/step1.hbs | 8 +-
.../main/service/widgets/create/step2.hbs | 10 +-
.../service/widgets/create/step2_add_metric.hbs | 6 +-
.../main/service/widgets/create/step2_graph.hbs | 4 +-
.../service/widgets/create/step2_template.hbs | 2 +-
.../main/service/widgets/create/step3.hbs | 38 +-
.../create/widget_property_threshold.hbs | 16 +-
.../main/service/widgets/create/wizard.hbs | 8 +-
.../app/templates/main/service/widgets/edit.hbs | 8 +-
ambari-web/app/templates/utils/ajax.hbs | 4 +-
...onfig_launch_switch_config_group_of_host.hbs | 11 +-
.../controls_service_config_radio_buttons.hbs | 10 +-
...trols_service_config_textfield_with_unit.hbs | 2 +-
ambari-web/app/templates/wizard/step0.hbs | 10 +-
ambari-web/app/templates/wizard/step1.hbs | 138 +-
.../app/templates/wizard/step1/vdf_upload.hbs | 14 +-
.../wizard/step1_addLocalRepository.hbs | 40 +-
ambari-web/app/templates/wizard/step2.hbs | 179 +-
ambari-web/app/templates/wizard/step3.hbs | 31 +-
.../step3/step3_host_warning_popup_footer.hbs | 10 +-
.../wizard/step3/step3_host_warnings_popup.hbs | 61 +-
ambari-web/app/templates/wizard/step4.hbs | 16 +-
.../step4/step4_ranger_requirements_popup.hbs | 7 +-
ambari-web/app/templates/wizard/step6.hbs | 33 +-
.../wizard/step6/step6_issues_popup.hbs | 2 +-
ambari-web/app/templates/wizard/step7.hbs | 4 +-
ambari-web/app/templates/wizard/step8.hbs | 2 +-
.../templates/wizard/step8/step8_log_popup.hbs | 2 +-
ambari-web/app/templates/wizard/step9.hbs | 29 +-
.../wizard/step9/step9HostTasksLogPopup.hbs | 8 +-
.../wizard/step9/step9_install_host_popup.hbs | 4 +-
ambari-web/app/utils/bootstrap_reopen.js | 42 +
ambari-web/app/utils/ember_reopen.js | 3 +-
ambari-web/app/utils/handlebars_helpers.js | 2 +-
ambari-web/app/utils/helper.js | 14 +-
ambari-web/app/utils/host_progress_popup.js | 57 +-
ambari-web/app/utils/hosts.js | 3 +-
ambari-web/app/utils/load_timer.js | 4 +-
.../common/assign_master_components_view.js | 5 +-
.../app/views/common/chart/linear_time.js | 10 +-
.../views/common/configs/config_history_flow.js | 6 +-
.../app/views/common/configs/controls_view.js | 2 +-
.../views/common/configs/service_config_view.js | 2 +-
.../configs/service_configs_by_category_view.js | 16 +-
.../widgets/checkbox_config_widget_view.js | 6 +-
.../configs/widgets/config_widget_view.js | 4 +-
.../widgets/directory_config_widget_view.js | 2 +-
.../configs/widgets/label_config_widget_view.js | 3 +-
.../widgets/string_config_widget_view.js | 2 +-
.../widgets/textfield_config_widget_view.js | 3 +-
ambari-web/app/views/common/controls_view.js | 79 +-
.../app/views/common/filter_combo_cleanable.js | 2 +-
ambari-web/app/views/common/filter_combobox.js | 2 +-
ambari-web/app/views/common/filter_view.js | 4 +-
.../views/common/helpers/status_icon_view.js | 30 +-
ambari-web/app/views/common/modal_popup.js | 16 +
.../config_validation_popup.js | 3 +-
.../dependent_configs_list_popup.js | 3 +-
.../modal_popups/log_file_search_popup.js | 3 +-
.../views/common/modal_popups/log_tail_popup.js | 3 +-
.../app/views/common/progress_bar_view.js | 16 +-
ambari-web/app/views/common/table_view.js | 93 +-
.../hawq/activateStandby/step3_view.js | 2 +-
.../hawq/addStandby/step4_view.js | 2 +-
.../hawq/removeStandby/step3_view.js | 2 +-
.../highAvailability/nameNode/step1_view.js | 2 +-
.../admin/highAvailability/progress_view.js | 12 +-
.../resourceManager/step4_view.js | 2 +-
.../views/main/admin/stack_and_upgrade_view.js | 3 +-
.../stack_upgrade/failed_hosts_modal_view.js | 4 +-
.../admin/stack_upgrade/upgrade_group_view.js | 2 +-
.../admin/stack_upgrade/upgrade_history_view.js | 3 +-
.../stack_upgrade/upgrade_version_box_view.js | 11 +-
.../upgrade_version_column_view.js | 6 +-
.../app/views/main/alert_definitions_view.js | 2 +-
.../main/alerts/definition_configs_view.js | 28 +-
.../main/alerts/definition_details_view.js | 2 +-
ambari-web/app/views/main/dashboard/widget.js | 3 +-
.../dashboard/widgets/cluster_metrics_widget.js | 4 +-
ambari-web/app/views/main/host.js | 1 +
ambari-web/app/views/main/host/details.js | 10 +-
.../main/host/details/host_component_view.js | 4 +-
.../views/main/host/hosts_table_menu_view.js | 2 -
.../app/views/main/host/stack_versions_view.js | 2 +-
ambari-web/app/views/main/menu.js | 38 +-
.../app/views/main/service/info/summary.js | 14 +-
ambari-web/app/views/main/service/item.js | 16 +-
.../views/main/service/reassign/step4_view.js | 2 +-
.../views/main/service/reassign/step6_view.js | 2 +-
.../views/main/service/reassign/step7_view.js | 2 +-
.../app/views/main/service/reconfigure.js | 8 +-
ambari-web/app/views/main/service/service.js | 2 +-
.../service/widgets/create/expression_view.js | 2 +-
ambari-web/app/views/wizard/step1_view.js | 3 +-
.../wizard/step3/hostWarningPopupFooter_view.js | 2 +-
ambari-web/app/views/wizard/step3_view.js | 6 +-
.../views/wizard/step9/hostLogPopupBody_view.js | 12 +-
ambari-web/app/views/wizard/step9_view.js | 26 +-
ambari-web/brunch-config.js | 17 +-
ambari-web/karma.conf.js | 4 +-
ambari-web/package.json | 3 +-
ambari-web/test/controllers/application_test.js | 12 -
ambari-web/test/controllers/installer_test.js | 6 +-
.../admin/stack_and_upgrade_controller_test.js | 6 +-
.../controllers/main/charts/heatmap_test.js | 12 +-
.../test/models/alerts/alert_definition_test.js | 14 +-
ambari-web/test/models/authentication_test.js | 4 +-
ambari-web/test/models/hosts_test.js | 16 +-
ambari-web/test/utils/helper_test.js | 4 +-
.../configs/widgets/config_widget_view_test.js | 2 +-
.../test/views/common/progress_bar_view_test.js | 16 +-
.../common/widget/gauge_widget_view_test.js | 3 +-
.../common/widget/graph_widget_view_test.js | 72 +-
.../highAvailability/progress_view_test.js | 12 +-
.../upgrade_version_box_view_test.js | 22 +-
.../host/details/host_component_view_test.js | 10 +-
.../decommissionable_test.js | 4 +-
ambari-web/test/views/main/menu_test.js | 96 +-
ambari-web/test/views/main/service/item_test.js | 100 +-
.../step3/hostWarningPopupFooter_view_test.js | 4 +-
ambari-web/test/views/wizard/step3_view_test.js | 10 +-
.../wizard/step9/hostLogPopupBody_view_test.js | 16 +-
ambari-web/test/views/wizard/step9_view_test.js | 28 +-
.../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes
.../fonts/glyphicons-halflings-regular.svg | 288 +
.../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes
.../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes
.../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes
ambari-web/vendor/scripts/bootstrap-combobox.js | 560 +-
.../vendor/scripts/bootstrap-datepicker.js | 6 +
ambari-web/vendor/scripts/bootstrap.js | 3284 ++---
ambari-web/vendor/scripts/ember-latest.js | 2 +-
ambari-web/vendor/scripts/jquery-1.7.2.min.js | 4 -
ambari-web/vendor/scripts/jquery-1.9.1.js | 9597 +++++++++++++++
ambari-web/vendor/scripts/jquery-migrate.js | 752 ++
ambari-web/vendor/styles/bootstrap.css | 10534 +++++++++--------
.../theme/fonts/Roboto-Regular-webfont.eot | Bin 0 -> 79547 bytes
.../theme/fonts/Roboto-Regular-webfont.svg | 7606 ++++++++++++
.../theme/fonts/Roboto-Regular-webfont.ttf | Bin 0 -> 234464 bytes
.../theme/fonts/Roboto-Regular-webfont.woff | Bin 0 -> 105700 bytes
utility/pom.xml | 21 +
415 files changed, 31666 insertions(+), 11170 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5dff9acd/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5dff9acd/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 56ae7ee,fd6ce1e..a802a83
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@@ -55,8 -61,11 +55,10 @@@ import org.apache.ambari.server.state.c
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.utils.SynchronousThreadPoolExecutor;
import org.junit.After;
+ import org.junit.AfterClass;
import org.junit.Before;
+ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith;
[02/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f0da4fa4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f0da4fa4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f0da4fa4
Branch: refs/heads/trunk
Commit: f0da4fa49ec8ce420ce4f495855af9b3e3b8d975
Parents: 561c6f2 aad2133
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Sep 28 10:17:25 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Sep 28 10:17:25 2016 -0400
----------------------------------------------------------------------
.../ambari-metrics/datasource.js | 64 +++
ambari-server/pom.xml | 36 ++
.../ambari/server/checks/CheckDescription.java | 5 +-
.../server/checks/UpgradeCheckRegistry.java | 76 +++
.../PreUpgradeCheckResourceProvider.java | 27 +-
.../server/stack/CommonServiceDirectory.java | 50 +-
.../ambari/server/stack/ServiceDirectory.java | 112 +++-
.../ambari/server/stack/ServiceModule.java | 8 +-
.../apache/ambari/server/stack/StackModule.java | 37 +-
.../server/stack/StackServiceDirectory.java | 81 +--
.../stack/UpdateActiveRepoVersionOnStartup.java | 9 +-
.../apache/ambari/server/state/ServiceInfo.java | 14 +
ambari-server/src/main/resources/alerts.json | 31 ++
.../HDF/grafana-kafka-home.json | 10 +-
.../HDF/grafana-storm-kafka-offset.json | 258 +++++++++
.../HDP/grafana-kafka-home.json | 10 +-
.../HDP/grafana-storm-kafka-offset.json | 258 +++++++++
.../2.1.0.2.0/package/scripts/params_linux.py | 7 +
.../YARN/2.1.0.2.0/package/scripts/yarn.py | 548 +++++++++----------
.../main/resources/host_scripts/alert_ulimit.py | 83 +++
.../scripts/shared_initialization.py | 2 +-
.../PreUpgradeCheckResourceProviderTest.java | 255 +++++++++
.../sample/checks/SampleServiceCheck.java | 52 ++
.../ambari/server/stack/ServiceModuleTest.java | 30 +
.../server/stack/StackManagerExtensionTest.java | 7 +
.../UpdateActiveRepoVersionOnStartupTest.java | 28 +-
.../test/python/host_scripts/TestAlertUlimit.py | 44 ++
.../stacks/2.0.6/YARN/test_historyserver.py | 1 -
.../stacks/2.0.6/YARN/test_resourcemanager.py | 54 +-
.../stacks/2.1/YARN/test_apptimelineserver.py | 40 +-
.../test/python/stacks/2.3/YARN/test_ats_1_5.py | 188 +++----
ambari-server/src/test/python/unitTests.py | 25 +-
ambari-web/app/assets/licenses/NOTICE.txt | 3 +
.../service/widgets/create/step2_controller.js | 26 +
ambari-web/app/messages.js | 13 +-
.../app/mixins/common/configs/configs_loader.js | 2 +-
ambari-web/app/models/cluster_states.js | 4 +-
ambari-web/app/routes/add_kerberos_routes.js | 4 +-
.../main/service/widgets/create/expression.hbs | 10 +-
.../main/service/widgets/create/step2_graph.hbs | 6 +-
.../service/widgets/create/step2_number.hbs | 6 +-
.../service/widgets/create/step2_template.hbs | 6 +-
ambari-web/app/utils/string_utils.js | 7 +-
.../views/common/configs/config_history_flow.js | 46 +-
.../app/views/common/rolling_restart_view.js | 19 +-
.../views/main/dashboard/widgets/hdfs_links.js | 2 +-
ambari-web/app/views/main/service/item.js | 3 +-
.../service/widgets/create/expression_view.js | 16 +-
ambari-web/brunch-config.js | 3 +-
.../resourceManager/wizard_controller_test.js | 1 -
.../common/configs/configs_loader_test.js | 10 +-
ambari-web/test/models/cluster_test.js | 12 +-
.../objects/service_config_property_test.js | 31 +-
.../configs/theme/sub_section_tab_test.js | 2 +-
.../test/views/main/host/log_metrics_test.js | 1 -
ambari-web/test/views/main/host_test.js | 4 +-
ambari-web/vendor/scripts/pluralize.js | 461 ++++++++++++++++
.../MICROSOFT_R/8.0.0/metainfo.xml | 4 +-
.../MICROSOFT_R/8.0.0/service_advisor.py | 22 +-
59 files changed, 2514 insertions(+), 660 deletions(-)
----------------------------------------------------------------------
[24/32] ambari git commit: AMBARI-18456 - Refactor Unnecessary
In-Memory Locks Around Business Objects (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
index 0f5c432..ff6dfc9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
@@ -184,17 +184,11 @@ public class ServiceTest {
s.addServiceComponent(sc3);
- sc1.persist();
- sc2.persist();
- sc3.persist();
-
ServiceComponent sc4 = s.addServiceComponent("HDFS_CLIENT");
Assert.assertNotNull(s.getServiceComponent(sc4.getName()));
Assert.assertEquals(State.INIT,
s.getServiceComponent("HDFS_CLIENT").getDesiredState());
Assert.assertTrue(sc4.isClientComponent());
- sc4.persist();
-
Assert.assertEquals(4, s.getServiceComponents().size());
Assert.assertNotNull(s.getServiceComponent(sc3.getName()));
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index 39a3948..fa5491e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -62,8 +62,6 @@ import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.persist.PersistService;
import com.google.inject.util.Modules;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Tests AMBARI-9368 and AMBARI-9761 which produced a deadlock during read and
@@ -611,7 +609,6 @@ public class ClusterDeadlockTest {
componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index 811d98b..ca12826 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -216,23 +216,19 @@ public class ClusterImplTest {
Service hdfs = cluster.addService("HDFS");
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
- nameNode.persist();
nameNode.addServiceComponentHost(hostName1).persist();
ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
- dataNode.persist();
dataNode.addServiceComponentHost(hostName1).persist();
dataNode.addServiceComponentHost(hostName2).persist();
ServiceComponent hdfsClient = hdfs.addServiceComponent("HDFS_CLIENT");
- hdfsClient.persist();
hdfsClient.addServiceComponentHost(hostName1).persist();
hdfsClient.addServiceComponentHost(hostName2).persist();
Service tez = cluster.addService(serviceToDelete);
ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
- tezClient.persist();
ServiceComponentHost tezClientHost1 = tezClient.addServiceComponentHost(hostName1);
tezClientHost1.persist();
ServiceComponentHost tezClientHost2 = tezClient.addServiceComponentHost(hostName2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index fed9b98..64d8184 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -391,25 +391,18 @@ public class ClusterTest {
s1.addServiceComponent(sc1CompA);
s1.addServiceComponent(sc1CompB);
s1.addServiceComponent(sc1CompC);
- sc1CompA.persist();
- sc1CompB.persist();
- sc1CompC.persist();
// Add ZK
ServiceComponent sc2CompA = serviceComponentFactory.createNew(s2, "ZOOKEEPER_SERVER");
ServiceComponent sc2CompB = serviceComponentFactory.createNew(s2, "ZOOKEEPER_CLIENT");
s2.addServiceComponent(sc2CompA);
s2.addServiceComponent(sc2CompB);
- sc2CompA.persist();
- sc2CompB.persist();
// Add Ganglia
ServiceComponent sc3CompA = serviceComponentFactory.createNew(s3, "GANGLIA_SERVER");
ServiceComponent sc3CompB = serviceComponentFactory.createNew(s3, "GANGLIA_MONITOR");
s3.addServiceComponent(sc3CompA);
s3.addServiceComponent(sc3CompB);
- sc3CompA.persist();
- sc3CompB.persist();
// Host 1 will have all components
ServiceComponentHost schHost1Serv1CompA = serviceComponentHostFactory.createNew(sc1CompA, "h-1");
@@ -692,7 +685,7 @@ public class ClusterTest {
c1.addService(s);
ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(sc);
- sc.persist();
+
ServiceComponentHost sch =
serviceComponentHostFactory.createNew(sc, "h1");
sc.addServiceComponentHost(sch);
@@ -711,7 +704,6 @@ public class ClusterTest {
c1.addService(s1);
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG");
s1.addServiceComponent(sc1);
- sc1.persist();
ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
sc1.addServiceComponentHost(sch1);
sch1.persist();
@@ -733,14 +725,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -766,14 +756,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -805,14 +793,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -845,14 +831,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -862,7 +846,6 @@ public class ClusterTest {
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
- scJT.persist();
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
schJTH1.persist();
@@ -908,14 +891,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -925,7 +906,6 @@ public class ClusterTest {
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
- scJT.persist();
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
schJTH1.persist();
@@ -972,14 +952,12 @@ public class ClusterTest {
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
- scNN.persist();
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
- scDN.persist();
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
scDNH1.persist();
@@ -989,7 +967,6 @@ public class ClusterTest {
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
- scJT.persist();
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
schJTH1.persist();
@@ -1172,8 +1149,6 @@ public class ClusterTest {
Service hdfs = c1.addService("HDFS");
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
- nameNode.persist();
-
assertEquals(2, c1.getServices().size());
assertEquals(2, injector.getProvider(EntityManager.class).get().
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
index 1012534..3e526d9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
@@ -400,7 +400,6 @@ public class ClustersDeadlockTest {
componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 94bd6d4..9e285c6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -444,12 +444,9 @@ public class ClustersTest {
Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
- nameNode.persist();
ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
- dataNode.persist();
ServiceComponent serviceCheckNode = hdfs.addServiceComponent("HDFS_CLIENT");
- serviceCheckNode.persist();
ServiceComponentHost nameNodeHost = nameNode.addServiceComponentHost(h1);
nameNodeHost.persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index 9106ec3..fbed6e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -239,7 +239,6 @@ public class ConcurrentServiceConfigVersionTest {
componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 4c87613..bb55597 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -268,7 +268,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 080886e..48279af 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -190,7 +190,6 @@ public class ServiceComponentHostTest {
} catch (ServiceComponentNotFoundException e) {
sc = serviceComponentFactory.createNew(s, svcComponent);
s.addServiceComponent(sc);
- sc.persist();
}
ServiceComponentHost impl = serviceComponentHostFactory.createNew(
[29/32] ambari git commit: AMBARI-18614 - Remove Unnecessary Locks
Inside Of SCH Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-18614 - Remove Unnecessary Locks Inside Of SCH Business Object Implementations (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2c600829
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2c600829
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2c600829
Branch: refs/heads/trunk
Commit: 2c6008293a664ab3b0f24a3f22be54fe0e5f1faf
Parents: aa29f56
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 18 08:21:08 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 18 08:21:08 2016 -0400
----------------------------------------------------------------------
.../apache/ambari/server/state/HostConfig.java | 12 +-
.../server/state/ServiceComponentHost.java | 6 -
.../server/state/cluster/ClusterImpl.java | 9 +-
.../svccomphost/ServiceComponentHostImpl.java | 968 +++++++------------
.../server/agent/HeartbeatProcessorTest.java | 54 +-
.../server/agent/TestHeartbeatHandler.java | 46 +-
.../server/agent/TestHeartbeatMonitor.java | 34 +-
.../configuration/RecoveryConfigHelperTest.java | 22 +-
.../AmbariManagementControllerTest.java | 56 +-
.../apache/ambari/server/events/EventsTest.java | 2 -
.../apache/ambari/server/orm/OrmTestHelper.java | 7 -
.../ComponentVersionCheckActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 2 -
.../server/state/ServiceComponentTest.java | 29 +-
.../state/cluster/ClusterDeadlockTest.java | 1 -
.../server/state/cluster/ClusterImplTest.java | 14 +-
.../server/state/cluster/ClusterTest.java | 37 -
.../state/cluster/ClustersDeadlockTest.java | 1 -
.../server/state/cluster/ClustersTest.java | 3 -
.../ConcurrentServiceConfigVersionTest.java | 1 -
...omponentHostConcurrentWriteDeadlockTest.java | 1 -
.../svccomphost/ServiceComponentHostTest.java | 2 -
22 files changed, 467 insertions(+), 841 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java
index fc22ba5..3e767e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java
@@ -17,12 +17,13 @@
*/
package org.apache.ambari.server.state;
-import com.google.common.base.Objects;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.map.annotate.JsonSerialize;
-import java.util.HashMap;
-import java.util.Map;
+import com.google.common.base.Objects;
/**
* Data structure that hangs off of the Host and points to what tags are
@@ -30,7 +31,7 @@ import java.util.Map;
*/
public class HostConfig {
- private final Map<Long, String> configGroupOverrides = new HashMap<Long, String>();
+ private final Map<Long, String> configGroupOverrides = new ConcurrentHashMap<Long, String>();
private String defaultVersionTag;
public HostConfig() {
@@ -67,8 +68,9 @@ public class HostConfig {
sb.append(", overrides = [ ");
int i = 0;
for (Map.Entry<Long, String> entry : configGroupOverrides.entrySet()) {
- if (i++ != 0)
+ if (i++ != 0) {
sb.append(", ");
+ }
sb.append(entry.getKey().toString() + " : " + entry.getValue());
}
sb.append("]");
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
index 586134c..e76ebf7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
@@ -182,12 +182,6 @@ public interface ServiceComponentHost {
*/
ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs);
- boolean isPersisted();
-
- void persist();
-
- void refresh();
-
void debugDump(StringBuilder sb);
boolean canBeRemoved();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 5566343..84697b8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -606,20 +606,13 @@ public class ClusterImpl implements Cluster {
}
@Override
+ @Transactional
public void addServiceComponentHosts(Collection<ServiceComponentHost> serviceComponentHosts) throws AmbariException {
for (ServiceComponentHost serviceComponentHost : serviceComponentHosts) {
Service service = getService(serviceComponentHost.getServiceName());
ServiceComponent serviceComponent = service.getServiceComponent(serviceComponentHost.getServiceComponentName());
serviceComponent.addServiceComponentHost(serviceComponentHost);
}
- persistServiceComponentHosts(serviceComponentHosts);
- }
-
- @Transactional
- void persistServiceComponentHosts(Collection<ServiceComponentHost> serviceComponentHosts) {
- for (ServiceComponentHost serviceComponentHost : serviceComponentHosts) {
- serviceComponentHost.persist();
- }
}
public void addServiceComponentHost(ServiceComponentHost svcCompHost)
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index a6f5040..485329b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -19,13 +19,13 @@
package org.apache.ambari.server.state.svccomphost;
import java.text.MessageFormat;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -78,11 +78,12 @@ import org.apache.ambari.server.state.fsm.SingleArcTransition;
import org.apache.ambari.server.state.fsm.StateMachine;
import org.apache.ambari.server.state.fsm.StateMachineFactory;
import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.jboss.netty.util.internal.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.collect.ImmutableList;
import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.assistedinject.AssistedInject;
import com.google.inject.persist.Transactional;
@@ -93,31 +94,33 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
LoggerFactory.getLogger(ServiceComponentHostImpl.class);
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
- private final Lock readLock = readWriteLock.readLock();
private final Lock writeLock = readWriteLock.writeLock();
private final ServiceComponent serviceComponent;
+
private final Host host;
- private volatile boolean persisted = false;
+
+ private final HostComponentStateDAO hostComponentStateDAO;
+
+ private final HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
+
+ private final HostDAO hostDAO;
@Inject
- HostComponentStateDAO hostComponentStateDAO;
- @Inject
- HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
- @Inject
- HostDAO hostDAO;
- @Inject
- RepositoryVersionDAO repositoryVersionDAO;
- @Inject
- ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
- @Inject
- Clusters clusters;
+ private RepositoryVersionDAO repositoryVersionDAO;
+
+ private final ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
+
+ private final Clusters clusters;
+
@Inject
- ConfigHelper helper;
+ private ConfigHelper helper;
+
@Inject
- AmbariMetaInfo ambariMetaInfo;
+ private AmbariMetaInfo ambariMetaInfo;
+
@Inject
- RepositoryVersionHelper repositoryVersionHelper;
+ private RepositoryVersionHelper repositoryVersionHelper;
/**
* Used for creating commands to send to the agents when alert definitions are
@@ -129,18 +132,12 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
/**
* Used to publish events relating to service CRUD operations.
*/
- @Inject
- private AmbariEventPublisher eventPublisher;
+ private final AmbariEventPublisher eventPublisher;
/**
* Data access object for stack.
*/
- @Inject
- private StackDAO stackDAO;
-
- // Only used when object state is not persisted
- private HostComponentStateEntity stateEntity;
- private HostComponentDesiredStateEntity desiredStateEntity;
+ private final StackDAO stackDAO;
/**
* The desired component state entity PK.
@@ -150,14 +147,19 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
/**
* Cache the generated id for host component state for fast lookups.
*/
- private Long hostComponentStateId;
+ private final Long hostComponentStateId;
private long lastOpStartTime;
private long lastOpEndTime;
private long lastOpLastUpdateTime;
- private Map<String, HostConfig> actualConfigs = new HashMap<String,
- HostConfig>();
- private List<Map<String, String>> processes = new ArrayList<Map<String, String>>();
+
+ private ConcurrentMap<String, HostConfig> actualConfigs = new ConcurrentHashMap<>();
+ private ImmutableList<Map<String, String>> processes = ImmutableList.of();
+
+ /**
+ * The name of the host (which should never, ever change)
+ */
+ private final String hostName;
private static final StateMachineFactory
<ServiceComponentHostImpl, State,
@@ -698,14 +700,9 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
}
private void resetLastOpInfo() {
- try {
- writeLock.lock();
- setLastOpStartTime(-1);
- setLastOpLastUpdateTime(-1);
- setLastOpEndTime(-1);
- } finally {
- writeLock.unlock();
- }
+ setLastOpStartTime(-1);
+ setLastOpLastUpdateTime(-1);
+ setLastOpEndTime(-1);
}
private void updateLastOpInfo(ServiceComponentHostEventType eventType,
@@ -740,8 +737,21 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@AssistedInject
public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
- @Assisted String hostName, Injector injector) {
- injector.injectMembers(this);
+ @Assisted String hostName, Clusters clusters, StackDAO stackDAO, HostDAO hostDAO,
+ ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+ HostComponentStateDAO hostComponentStateDAO,
+ HostComponentDesiredStateDAO hostComponentDesiredStateDAO,
+ AmbariEventPublisher eventPublisher) {
+
+ this.serviceComponent = serviceComponent;
+ this.hostName = hostName;
+ this.clusters = clusters;
+ this.stackDAO = stackDAO;
+ this.hostDAO = hostDAO;
+ this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+ this.hostComponentStateDAO = hostComponentStateDAO;
+ this.hostComponentDesiredStateDAO = hostComponentDesiredStateDAO;
+ this.eventPublisher = eventPublisher;
if (serviceComponent.isClientComponent()) {
stateMachine = clientStateMachineFactory.make(this);
@@ -749,8 +759,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
stateMachine = daemonStateMachineFactory.make(this);
}
- this.serviceComponent = serviceComponent;
-
HostEntity hostEntity = null;
try {
host = clusters.getHost(hostName);
@@ -767,7 +775,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
stackId.getStackVersion());
- stateEntity = new HostComponentStateEntity();
+ HostComponentStateEntity stateEntity = new HostComponentStateEntity();
stateEntity.setClusterId(serviceComponent.getClusterId());
stateEntity.setComponentName(serviceComponent.getName());
stateEntity.setServiceName(serviceComponent.getServiceName());
@@ -777,7 +785,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
stateEntity.setUpgradeState(UpgradeState.NONE);
stateEntity.setCurrentStack(stackEntity);
- desiredStateEntity = new HostComponentDesiredStateEntity();
+ HostComponentDesiredStateEntity desiredStateEntity = new HostComponentDesiredStateEntity();
desiredStateEntity.setClusterId(serviceComponent.getClusterId());
desiredStateEntity.setComponentName(serviceComponent.getName());
desiredStateEntity.setServiceName(serviceComponent.getServiceName());
@@ -793,19 +801,40 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
desiredStateEntityPK = getHostComponentDesiredStateEntityPK(desiredStateEntity);
+ persistEntities(hostEntity, stateEntity, desiredStateEntity);
+
+ // publish the service component installed event
+ ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(getClusterId(),
+ stackId.getStackName(), stackId.getStackVersion(), getServiceName(),
+ getServiceComponentName(), getHostName(), isRecoveryEnabled());
+
+ eventPublisher.publish(event);
+
+ hostComponentStateId = stateEntity.getId();
+
resetLastOpInfo();
}
@AssistedInject
public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
- @Assisted HostComponentStateEntity stateEntity,
- @Assisted HostComponentDesiredStateEntity desiredStateEntity,
- Injector injector) {
- injector.injectMembers(this);
- this.serviceComponent = serviceComponent;
+ @Assisted HostComponentStateEntity stateEntity,
+ @Assisted HostComponentDesiredStateEntity desiredStateEntity, Clusters clusters,
+ StackDAO stackDAO, HostDAO hostDAO,
+ ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+ HostComponentStateDAO hostComponentStateDAO,
+ HostComponentDesiredStateDAO hostComponentDesiredStateDAO,
+ AmbariEventPublisher eventPublisher) {
+
+ hostName = stateEntity.getHostName();
- this.desiredStateEntity = desiredStateEntity;
- this.stateEntity = stateEntity;
+ this.serviceComponent = serviceComponent;
+ this.clusters = clusters;
+ this.stackDAO = stackDAO;
+ this.hostDAO = hostDAO;
+ this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+ this.hostComponentStateDAO = hostComponentStateDAO;
+ this.hostComponentDesiredStateDAO = hostComponentDesiredStateDAO;
+ this.eventPublisher = eventPublisher;
desiredStateEntityPK = getHostComponentDesiredStateEntityPK(desiredStateEntity);
hostComponentStateId = stateEntity.getId();
@@ -825,8 +854,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
LOG.error("Host '{}' was not found " + stateEntity.getHostName());
throw new RuntimeException(e);
}
-
- persisted = true;
}
@Override
@@ -838,119 +865,81 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
public void setState(State state) {
- writeLock.lock();
- try {
- stateMachine.setCurrentState(state);
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- getStateEntity().setCurrentState(state);
- saveComponentStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- writeLock.unlock();
+ stateMachine.setCurrentState(state);
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ stateEntity.setCurrentState(state);
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
}
@Override
public String getVersion() {
- readLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- return stateEntity.getVersion();
- } else {
- LOG.warn("Trying to fetch a member from an entity object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
- } finally {
- readLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ return stateEntity.getVersion();
+ } else {
+ LOG.warn("Trying to fetch a member from an entity object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
+
return null;
}
@Override
public void setVersion(String version) {
- writeLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- getStateEntity().setVersion(version);
- saveComponentStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
- } finally {
- writeLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ stateEntity.setVersion(version);
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
}
@Override
public SecurityState getSecurityState() {
- readLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- return getStateEntity().getSecurityState();
- } else {
- LOG.warn("Trying to fetch a member from an entity object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- readLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ return stateEntity.getSecurityState();
+ } else {
+ LOG.warn("Trying to fetch a member from an entity object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
+
return null;
}
@Override
public void setSecurityState(SecurityState securityState) {
- writeLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- getStateEntity().setSecurityState(securityState);
- saveComponentStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- writeLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ stateEntity.setSecurityState(securityState);
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
}
@Override
public SecurityState getDesiredSecurityState() {
- readLock.lock();
- try {
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- return getDesiredStateEntity().getSecurityState();
- } else {
- LOG.warn("Trying to fetch a member from an entity object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- readLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ return desiredStateEntity.getSecurityState();
+ } else {
+ LOG.warn("Trying to fetch a member from an entity object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
return null;
}
@@ -961,16 +950,12 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
throw new AmbariException("The security state must be an endpoint state");
}
- writeLock.lock();
- try {
- LOG.debug("Set DesiredSecurityState on serviceName = {} componentName = {} hostName = {} to {}",
+ LOG.debug("Set DesiredSecurityState on serviceName = {} componentName = {} hostName = {} to {}",
getServiceName(), getServiceComponentName(), getHostName(), securityState);
- getDesiredStateEntity().setSecurityState(securityState);
- saveComponentDesiredStateEntityIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ desiredStateEntity.setSecurityState(securityState);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
}
/**
@@ -984,41 +969,26 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
*/
@Override
public void setUpgradeState(UpgradeState upgradeState) {
- writeLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- stateEntity.setUpgradeState(upgradeState);
- saveComponentStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- writeLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ stateEntity.setUpgradeState(upgradeState);
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
}
@Override
public UpgradeState getUpgradeState() {
- readLock.lock();
-
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- return stateEntity.getUpgradeState();
- } else {
- LOG.warn("Trying to fetch a state entity from an object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- readLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ return stateEntity.getUpgradeState();
+ } else {
+ LOG.warn("Trying to fetch a state entity from an object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
return UpgradeState.NONE;
@@ -1038,8 +1008,9 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
writeLock.lock();
try {
stateMachine.doTransition(event.getType(), event);
- getStateEntity().setCurrentState(stateMachine.getCurrentState());
- saveComponentStateEntityIfPersisted();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ stateEntity.setCurrentState(stateMachine.getCurrentState());
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
// TODO Audit logs
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle ServiceComponentHostEvent event at"
@@ -1092,72 +1063,42 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
* @return the lastOpStartTime
*/
public long getLastOpStartTime() {
- readLock.lock();
- try {
- return lastOpStartTime;
- } finally {
- readLock.unlock();
- }
+ return lastOpStartTime;
}
/**
* @param lastOpStartTime the lastOpStartTime to set
*/
public void setLastOpStartTime(long lastOpStartTime) {
- writeLock.lock();
- try {
- this.lastOpStartTime = lastOpStartTime;
- } finally {
- writeLock.unlock();
- }
+ this.lastOpStartTime = lastOpStartTime;
}
/**
* @return the lastOpEndTime
*/
public long getLastOpEndTime() {
- readLock.lock();
- try {
- return lastOpEndTime;
- } finally {
- readLock.unlock();
- }
+ return lastOpEndTime;
}
/**
* @param lastOpEndTime the lastOpEndTime to set
*/
public void setLastOpEndTime(long lastOpEndTime) {
- writeLock.lock();
- try {
- this.lastOpEndTime = lastOpEndTime;
- } finally {
- writeLock.unlock();
- }
+ this.lastOpEndTime = lastOpEndTime;
}
/**
* @return the lastOpLastUpdateTime
*/
public long getLastOpLastUpdateTime() {
- readLock.lock();
- try {
- return lastOpLastUpdateTime;
- } finally {
- readLock.unlock();
- }
+ return lastOpLastUpdateTime;
}
/**
* @param lastOpLastUpdateTime the lastOpLastUpdateTime to set
*/
public void setLastOpLastUpdateTime(long lastOpLastUpdateTime) {
- writeLock.lock();
- try {
- this.lastOpLastUpdateTime = lastOpLastUpdateTime;
- } finally {
- writeLock.unlock();
- }
+ this.lastOpLastUpdateTime = lastOpLastUpdateTime;
}
@Override
@@ -1177,220 +1118,163 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
public StackId getStackVersion() {
- readLock.lock();
- try {
- HostComponentStateEntity schStateEntity = stateEntity;
- if (schStateEntity == null) {
- return new StackId();
- }
-
- StackEntity currentStackEntity = schStateEntity.getCurrentStack();
- return new StackId(currentStackEntity.getStackName(),
- currentStackEntity.getStackVersion());
- } finally {
- readLock.unlock();
+ HostComponentStateEntity schStateEntity = getStateEntity();
+ if (schStateEntity == null) {
+ return new StackId();
}
+
+ StackEntity currentStackEntity = schStateEntity.getCurrentStack();
+ return new StackId(currentStackEntity.getStackName(), currentStackEntity.getStackVersion());
}
@Override
public void setStackVersion(StackId stackId) {
StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
- writeLock.lock();
- try {
- HostComponentStateEntity stateEntity = getStateEntity();
- if (stateEntity != null) {
- stateEntity.setCurrentStack(stackEntity);
- saveComponentStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- writeLock.unlock();
+ HostComponentStateEntity stateEntity = getStateEntity();
+ if (stateEntity != null) {
+ stateEntity.setCurrentStack(stackEntity);
+ stateEntity = hostComponentStateDAO.merge(stateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
}
@Override
public State getDesiredState() {
- readLock.lock();
- try {
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- return desiredStateEntity.getDesiredState();
- } else {
- LOG.warn("Trying to fetch a member from an entity object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- readLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ return desiredStateEntity.getDesiredState();
+ } else {
+ LOG.warn("Trying to fetch a member from an entity object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
+
return null;
}
@Override
public void setDesiredState(State state) {
- writeLock.lock();
- try {
- LOG.debug("Set DesiredState on serviceName = {} componentName = {} hostName = {} to {} ",
+ LOG.debug("Set DesiredState on serviceName = {} componentName = {} hostName = {} to {} ",
getServiceName(), getServiceComponentName(), getHostName(), state);
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setDesiredState(state);
- saveComponentDesiredStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() +
- "hostName = " + getHostName());
- }
- } finally {
- writeLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setDesiredState(state);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + "hostName = " + getHostName());
}
}
@Override
public StackId getDesiredStackVersion() {
- readLock.lock();
- try {
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- StackEntity desiredStackEntity = desiredStateEntity.getDesiredStack();
- return new StackId(desiredStackEntity.getStackName(),
- desiredStackEntity.getStackVersion());
- } else {
- LOG.warn("Trying to fetch a member from an entity object that may " +
- "have been previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() + ", " +
- "hostName = " + getHostName());
- }
-
- } finally {
- readLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ StackEntity desiredStackEntity = desiredStateEntity.getDesiredStack();
+ return new StackId(desiredStackEntity.getStackName(), desiredStackEntity.getStackVersion());
+ } else {
+ LOG.warn("Trying to fetch a member from an entity object that may "
+ + "have been previously deleted, serviceName = " + getServiceName() + ", "
+ + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
}
return null;
}
@Override
public void setDesiredStackVersion(StackId stackId) {
- writeLock.lock();
- try {
- LOG.debug("Set DesiredStackVersion on serviceName = {} componentName = {} hostName = {} to {}",
+ LOG.debug("Set DesiredStackVersion on serviceName = {} componentName = {} hostName = {} to {}",
getServiceName(), getServiceComponentName(), getHostName(), stackId);
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
- stackId.getStackVersion());
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
- desiredStateEntity.setDesiredStack(stackEntity);
- saveComponentDesiredStateEntityIfPersisted();
- }
- } finally {
- writeLock.unlock();
+ desiredStateEntity.setDesiredStack(stackEntity);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
}
}
@Override
public HostComponentAdminState getComponentAdminState() {
- readLock.lock();
- try {
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- HostComponentAdminState adminState = desiredStateEntity.getAdminState();
- if (adminState == null && !serviceComponent.isClientComponent()
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ HostComponentAdminState adminState = desiredStateEntity.getAdminState();
+ if (adminState == null && !serviceComponent.isClientComponent()
&& !serviceComponent.isMasterComponent()) {
- adminState = HostComponentAdminState.INSERVICE;
- }
- return adminState;
+ adminState = HostComponentAdminState.INSERVICE;
}
-
- } finally {
- readLock.unlock();
+ return adminState;
}
+
return null;
}
@Override
public void setComponentAdminState(HostComponentAdminState attribute) {
- writeLock.lock();
- try {
- LOG.debug("Set ComponentAdminState on serviceName = {} componentName = {} hostName = {} to {}",
+ LOG.debug("Set ComponentAdminState on serviceName = {} componentName = {} hostName = {} to {}",
getServiceName(), getServiceComponentName(), getHostName(), attribute);
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setAdminState(attribute);
- saveComponentDesiredStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() +
- "hostName = " + getHostName());
- }
- } finally {
- writeLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setAdminState(attribute);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + "hostName = " + getHostName());
}
}
@Override
public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
- readLock.lock();
- try {
- HostComponentStateEntity hostComponentStateEntity = getStateEntity();
- if (null == hostComponentStateEntity) {
- LOG.warn(
- "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
- getHostName());
- return null;
- }
+ HostComponentStateEntity hostComponentStateEntity = getStateEntity();
+ if (null == hostComponentStateEntity) {
+ LOG.warn(
+ "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
+ getHostName());
+ return null;
+ }
- String clusterName = serviceComponent.getClusterName();
- String serviceName = serviceComponent.getServiceName();
- String serviceComponentName = serviceComponent.getName();
- String hostName = getHostName();
- String state = getState().toString();
- String stackId = getStackVersion().getStackId();
- String desiredState = getDesiredState().toString();
- String desiredStackId = getDesiredStackVersion().getStackId();
- HostComponentAdminState componentAdminState = getComponentAdminState();
- UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
-
- String displayName = null;
- try {
- ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
- getStackVersion().getStackVersion(), serviceName, serviceComponentName);
- displayName = compInfo.getDisplayName();
- } catch (AmbariException e) {
- displayName = serviceComponentName;
- }
+ String clusterName = serviceComponent.getClusterName();
+ String serviceName = serviceComponent.getServiceName();
+ String serviceComponentName = serviceComponent.getName();
+ String hostName = getHostName();
+ String state = getState().toString();
+ String stackId = getStackVersion().getStackId();
+ String desiredState = getDesiredState().toString();
+ String desiredStackId = getDesiredStackVersion().getStackId();
+ HostComponentAdminState componentAdminState = getComponentAdminState();
+ UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
- ServiceComponentHostResponse r = new ServiceComponentHostResponse(
- clusterName, serviceName,
- serviceComponentName, displayName, hostName, state,
- stackId, desiredState,
- desiredStackId, componentAdminState);
+ String displayName = null;
+ try {
+ ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
+ getStackVersion().getStackVersion(), serviceName, serviceComponentName);
+ displayName = compInfo.getDisplayName();
+ } catch (AmbariException e) {
+ displayName = serviceComponentName;
+ }
- r.setActualConfigs(actualConfigs);
- r.setUpgradeState(upgradeState);
+ ServiceComponentHostResponse r = new ServiceComponentHostResponse(clusterName, serviceName,
+ serviceComponentName, displayName, hostName, state, stackId, desiredState, desiredStackId,
+ componentAdminState);
- try {
- r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
- } catch (Exception e) {
- LOG.error("Could not determine stale config", e);
- }
+ r.setActualConfigs(actualConfigs);
+ r.setUpgradeState(upgradeState);
- return r;
- } finally {
- readLock.unlock();
+ try {
+ r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
+ } catch (Exception e) {
+ LOG.error("Could not determine stale config", e);
}
+
+ return r;
}
@Override
@@ -1400,79 +1284,35 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
public void debugDump(StringBuilder sb) {
- readLock.lock();
- try {
- sb.append("ServiceComponentHost={ hostname=").append(getHostName()).append(
- ", serviceComponentName=").append(serviceComponent.getName()).append(
- ", clusterName=").append(serviceComponent.getClusterName()).append(
- ", serviceName=").append(serviceComponent.getServiceName()).append(
- ", desiredStackVersion=").append(getDesiredStackVersion()).append(
- ", desiredState=").append(getDesiredState()).append(", stackVersion=").append(
- getStackVersion()).append(", state=").append(getState()).append(
- ", securityState=").append(getSecurityState()).append(
- ", desiredSecurityState=").append(getDesiredSecurityState()).append(
- " }");
- } finally {
- readLock.unlock();
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isPersisted() {
- // a lock around this internal state variable is not required since we
- // have appropriate locks in the persist() method and this member is
- // only ever false under the condition that the object is new
- return persisted;
- }
-
- /**
- * {@inheritDoc}
- * <p/>
- * This method uses Java locks and then delegates to internal methods which
- * perform the JPA merges inside of a transaction. Because of this, a
- * transaction is not necessary before this calling this method.
- */
- @Override
- public void persist() {
- writeLock.lock();
- try {
- if (!persisted) {
- // persist the new cluster topology
- persistEntities();
- persisted = true;
-
- refresh();
-
- // publish the service component installed event
- StackId stackId = getDesiredStackVersion();
-
- ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(getClusterId(),
- stackId.getStackName(), stackId.getStackVersion(), getServiceName(),
- getServiceComponentName(), getHostName(), isRecoveryEnabled());
-
- eventPublisher.publish(event);
- } else {
- saveComponentStateEntityIfPersisted();
- saveComponentDesiredStateEntityIfPersisted();
- }
- } finally {
- writeLock.unlock();
- }
+ sb.append("ServiceComponentHost={ hostname=").append(getHostName())
+ .append(", serviceComponentName=")
+ .append(serviceComponent.getName())
+ .append(", clusterName=")
+ .append(serviceComponent.getClusterName())
+ .append(", serviceName=")
+ .append(serviceComponent.getServiceName())
+ .append(", desiredStackVersion=")
+ .append(getDesiredStackVersion())
+ .append(", desiredState=")
+ .append(getDesiredState())
+ .append(", stackVersion=")
+ .append(getStackVersion())
+ .append(", state=")
+ .append(getState())
+ .append(", securityState=")
+ .append(getSecurityState())
+ .append(", desiredSecurityState=")
+ .append(getDesiredSecurityState())
+ .append(" }");
}
@Transactional
- protected void persistEntities() {
+ private void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
+ HostComponentDesiredStateEntity desiredStateEntity) {
ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
serviceComponent.getClusterId(), serviceComponent.getServiceName(),
serviceComponent.getName());
- HostEntity hostEntity = hostDAO.findByName(getHostName());
- hostEntity.addHostComponentStateEntity(stateEntity);
- hostEntity.addHostComponentDesiredStateEntity(desiredStateEntity);
-
desiredStateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity);
desiredStateEntity.setHostEntity(hostEntity);
@@ -1485,68 +1325,17 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
serviceComponentDesiredStateEntity.getHostComponentDesiredStateEntities().add(
desiredStateEntity);
- HostComponentStateEntity stateEntity = hostComponentStateDAO.findByIndex(serviceComponent.getClusterId(),
- serviceComponent.getServiceName(), serviceComponent.getName(), hostEntity.getHostId());
-
- hostComponentStateId = stateEntity.getId();
-
- serviceComponentDesiredStateDAO.merge(serviceComponentDesiredStateEntity);
- hostDAO.merge(hostEntity);
- }
-
- @Override
- public void refresh() {
- writeLock.lock();
- try {
- getDesiredStateEntity();
- getStateEntity();
- } finally {
- writeLock.unlock();
- }
- }
-
- /**
- * Merges the encapsulated {@link HostComponentStateEntity} inside of a new transaction. This
- * method assumes that the appropriate write lock has already been acquired
- * from {@link #readWriteLock}.
- */
- @Transactional
- void saveComponentStateEntityIfPersisted() {
- if (isPersisted()) {
- hostComponentStateDAO.merge(stateEntity);
- }
- }
-
- /**
- * Merges the encapsulated {@link HostComponentDesiredStateEntity} inside of a new transaction. This
- * method assumes that the appropriate write lock has already been acquired
- * from {@link #readWriteLock}.
- */
- @Transactional
- void saveComponentDesiredStateEntityIfPersisted() {
- if (isPersisted()) {
- LOG.debug("Save desiredStateEntity serviceName = {} componentName = {} hostName = {} desiredState = {}",
- getServiceName(), getServiceComponentName(), getHostName(), desiredStateEntity.getDesiredState());
+ serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge(
+ serviceComponentDesiredStateEntity);
- hostComponentDesiredStateDAO.merge(desiredStateEntity);
- }
+ hostEntity.addHostComponentStateEntity(stateEntity);
+ hostEntity.addHostComponentDesiredStateEntity(desiredStateEntity);
+ hostEntity = hostDAO.merge(hostEntity);
}
-
@Override
public boolean canBeRemoved() {
- boolean schLockAcquired = false;
- try {
- // if unable to read, then writers are writing; cannot remove SCH
- schLockAcquired = readLock.tryLock();
-
- return schLockAcquired && (getState().isRemovableState());
-
- } finally {
- if (schLockAcquired) {
- readLock.unlock();
- }
- }
+ return getState().isRemovableState();
}
@Override
@@ -1555,12 +1344,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
writeLock.lock();
try {
- if (persisted) {
- removeEntities();
- persisted = false;
- fireRemovalEvent = true;
- }
-
+ removeEntities();
+ fireRemovalEvent = true;
clusters.getCluster(getClusterName()).removeServiceComponentHost(this);
} catch (AmbariException ex) {
LOG.error("Unable to remove a service component from a host", ex);
@@ -1618,56 +1403,43 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
return;
}
- writeLock.lock();
- try {
- LOG.debug("Updating actual config tags: " + configTags);
- actualConfigs = new HashMap<String, HostConfig>();
-
- for (Entry<String, Map<String, String>> entry : configTags.entrySet()) {
- String type = entry.getKey();
- Map<String, String> values = new HashMap<String, String>(
- entry.getValue());
-
- String tag = values.get(ConfigHelper.CLUSTER_DEFAULT_TAG);
- values.remove(ConfigHelper.CLUSTER_DEFAULT_TAG);
-
- HostConfig hc = new HostConfig();
- hc.setDefaultVersionTag(tag);
- actualConfigs.put(type, hc);
-
- if (!values.isEmpty()) {
- for (Entry<String, String> overrideEntry : values.entrySet()) {
- Long groupId = Long.parseLong(overrideEntry.getKey());
- hc.getConfigGroupOverrides().put(groupId, overrideEntry.getValue());
- if (!configGroupMap.containsKey(groupId)) {
- LOG.debug("Config group does not exist, id = " + groupId);
- }
+ LOG.debug("Updating configuration tags for {}: {}", hostName, configTags);
+ final ConcurrentMap<String, HostConfig> newActualConfigs = new ConcurrentHashMap<>();
+
+ for (Entry<String, Map<String, String>> entry : configTags.entrySet()) {
+ String type = entry.getKey();
+ Map<String, String> values = new HashMap<String, String>(entry.getValue());
+
+ String tag = values.get(ConfigHelper.CLUSTER_DEFAULT_TAG);
+ values.remove(ConfigHelper.CLUSTER_DEFAULT_TAG);
+
+ HostConfig hc = new HostConfig();
+ hc.setDefaultVersionTag(tag);
+ newActualConfigs.put(type, hc);
+
+ if (!values.isEmpty()) {
+ for (Entry<String, String> overrideEntry : values.entrySet()) {
+ Long groupId = Long.parseLong(overrideEntry.getKey());
+ hc.getConfigGroupOverrides().put(groupId, overrideEntry.getValue());
+ if (!configGroupMap.containsKey(groupId)) {
+ LOG.debug("Config group does not exist, id = " + groupId);
}
}
}
- } finally {
- writeLock.unlock();
}
+
+ // update internal stateful collection in an "atomic" manner
+ actualConfigs = newActualConfigs;
}
@Override
public Map<String, HostConfig> getActualConfigs() {
- readLock.lock();
- try {
- return actualConfigs;
- } finally {
- readLock.unlock();
- }
+ return actualConfigs;
}
@Override
public HostState getHostState() {
- readLock.lock();
- try {
- return host.getState();
- } finally {
- readLock.unlock();
- }
+ return host.getState();
}
@Override
@@ -1677,90 +1449,57 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
public void setMaintenanceState(MaintenanceState state) {
- writeLock.lock();
- try {
- LOG.debug("Set MaintenanceState on serviceName = {} componentName = {} hostName = {} to {}",
+ LOG.debug("Set MaintenanceState on serviceName = {} componentName = {} hostName = {} to {}",
getServiceName(), getServiceComponentName(), getHostName(), state);
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setMaintenanceState(state);
- saveComponentDesiredStateEntityIfPersisted();
-
- // broadcast the maintenance mode change
- MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
- eventPublisher.publish(event);
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() +
- ", hostName = " + getHostName());
- }
- } finally {
- writeLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setMaintenanceState(state);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
+
+ // broadcast the maintenance mode change
+ MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
+ eventPublisher.publish(event);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", hostName = " + getHostName());
}
}
@Override
public MaintenanceState getMaintenanceState() {
- readLock.lock();
- try {
- return getDesiredStateEntity().getMaintenanceState();
- } finally {
- readLock.unlock();
- }
+ return getDesiredStateEntity().getMaintenanceState();
}
@Override
public void setProcesses(List<Map<String, String>> procs) {
- writeLock.lock();
- try {
- processes = Collections.unmodifiableList(procs);
- } finally {
- writeLock.unlock();
- }
+ processes = ImmutableList.copyOf(procs);
}
@Override
public List<Map<String, String>> getProcesses() {
- readLock.lock();
- try {
- return processes;
- } finally {
- readLock.unlock();
- }
+ return processes;
}
@Override
public boolean isRestartRequired() {
- readLock.lock();
- try {
- return getDesiredStateEntity().isRestartRequired();
- } finally {
- readLock.unlock();
- }
+ return getDesiredStateEntity().isRestartRequired();
}
@Override
public void setRestartRequired(boolean restartRequired) {
- writeLock.lock();
- try {
- LOG.debug("Set RestartRequired on serviceName = {} componentName = {} hostName = {} to {}",
+ LOG.debug("Set RestartRequired on serviceName = {} componentName = {} hostName = {} to {}",
getServiceName(), getServiceComponentName(), getHostName(), restartRequired);
- HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setRestartRequired(restartRequired);
- saveComponentDesiredStateEntityIfPersisted();
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + getServiceName() + ", " +
- "componentName = " + getServiceComponentName() +
- ", hostName = " + getHostName());
- }
-
- } finally {
- writeLock.unlock();
+ HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setRestartRequired(restartRequired);
+ hostComponentDesiredStateDAO.merge(desiredStateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
+ + getServiceComponentName() + ", hostName = " + getHostName());
}
}
@@ -1815,30 +1554,27 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
final StackId stackId = cluster.getDesiredStackVersion();
final StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
- writeLock.lock();
- try {
- // Check if there is a Repo Version already for the version.
- // If it doesn't exist, will have to create it.
- repositoryVersion = repositoryVersionDAO.findByStackNameAndVersion(stackId.getStackName(), version);
-
- if (null == repositoryVersion) {
- repositoryVersion = createRepositoryVersion(version, stackId, stackInfo);
- }
+ // Check if there is a Repo Version already for the version.
+ // If it doesn't exist, will have to create it.
+ repositoryVersion = repositoryVersionDAO.findByStackNameAndVersion(stackId.getStackName(), version);
- final HostEntity host = hostDAO.findById(hostId);
- cluster.transitionHostVersionState(host, repositoryVersion, stackId);
- } finally {
- writeLock.unlock();
+ if (null == repositoryVersion) {
+ repositoryVersion = createRepositoryVersion(version, stackId, stackInfo);
}
+
+ final HostEntity host = hostDAO.findById(hostId);
+ cluster.transitionHostVersionState(host, repositoryVersion, stackId);
+
return repositoryVersion;
}
- // Get the cached desired state entity or load it fresh through the DAO.
+ /**
+ * Gets the desired state entity for this {@link ServiceComponentHost}.
+ *
+ * @return
+ */
private HostComponentDesiredStateEntity getDesiredStateEntity() {
- if (isPersisted()) {
- desiredStateEntity = hostComponentDesiredStateDAO.findByPK(desiredStateEntityPK);
- }
- return desiredStateEntity;
+ return hostComponentDesiredStateDAO.findByPK(desiredStateEntityPK);
}
/**
@@ -1848,11 +1584,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
* {@link ServiceComponentHost}, or {@code null} if there is none.
*/
private HostComponentStateEntity getStateEntity() {
- if (isPersisted()) {
- stateEntity = hostComponentStateDAO.findById(hostComponentStateId);
- }
-
- return stateEntity;
+ return hostComponentStateDAO.findById(hostComponentStateId);
}
// create a PK object from the given desired component state entity.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index dcffece..6ebd11a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -160,11 +160,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -229,7 +229,7 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -294,11 +294,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -378,11 +378,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -462,11 +462,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -591,7 +591,7 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -710,7 +710,7 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -804,7 +804,7 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
ActionQueue aq = new ActionQueue();
@@ -885,11 +885,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -969,11 +969,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -1089,11 +1089,11 @@ public class HeartbeatProcessorTest {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -1296,10 +1296,10 @@ public class HeartbeatProcessorTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).
- addServiceComponentHost(DummyHostname1).persist();
+ addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).
- addServiceComponentHost(DummyHostname1).persist();
+ addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index cd41929..e50b8c9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -250,7 +250,7 @@ public class TestHeartbeatHandler {
HeartBeatResponse resp = handler.handleHeartBeat(hb);
Assert.assertFalse(resp.hasMappedComponents());
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
serviceComponentHost1.setState(State.INIT);
@@ -274,10 +274,10 @@ public class TestHeartbeatHandler {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).
- addServiceComponentHost(DummyHostname1).persist();
+ addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).
- addServiceComponentHost(DummyHostname1).persist();
+ addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -386,14 +386,14 @@ public class TestHeartbeatHandler {
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
Host hostObject = clusters.getHost(DummyHostname1);
hostObject.setIPv4("ipv4");
@@ -452,15 +452,15 @@ public class TestHeartbeatHandler {
*/
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT).setRecoveryEnabled(true);
hdfs.getServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
Host hostObject = clusters.getHost(DummyHostname1);
hostObject.setIPv4("ipv4");
@@ -781,11 +781,11 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -838,11 +838,11 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(SECONDARY_NAMENODE);
- hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
ActionQueue aq = new ActionQueue();
@@ -912,11 +912,11 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(HDFS_CLIENT);
- hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -989,9 +989,9 @@ public class TestHeartbeatHandler {
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -1070,9 +1070,9 @@ public class TestHeartbeatHandler {
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -1404,7 +1404,7 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
ActionQueue aq = new ActionQueue();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index c4d735d..76ab45c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -169,11 +169,11 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
hdfs.addServiceComponent(Role.DATANODE.name());
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.NAMENODE.name());
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
- hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -262,18 +262,18 @@ public class TestHeartbeatMonitor {
Service hdfs = cluster.addService(serviceName);
hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
- (hostname1).persist();
+ (hostname1);
hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost
- (hostname1).persist();
+ (hostname1);
hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).
- addServiceComponentHost(hostname1).persist();
+ addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
- (hostname1).persist();
+ (hostname1);
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
- (hostname2).persist();
+ (hostname2);
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -369,11 +369,11 @@ public class TestHeartbeatMonitor {
Service hdfs = cluster.addService(serviceName);
hdfs.addServiceComponent(Role.DATANODE.name());
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.NAMENODE.name());
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
- hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -451,11 +451,11 @@ public class TestHeartbeatMonitor {
Service hdfs = cluster.addService(serviceName);
hdfs.addServiceComponent(Role.DATANODE.name());
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.NAMENODE.name());
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
- hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(hostname1);
@@ -571,11 +571,11 @@ public class TestHeartbeatMonitor {
Service hdfs = cluster.addService(serviceName);
hdfs.addServiceComponent(Role.DATANODE.name());
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.NAMENODE.name());
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
- hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+ hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index b082145..6533e1c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -107,7 +107,7 @@ public class RecoveryConfigHelperTest {
/**
* Test cluster-env properties from a dummy cluster
- *
+ *
* @throws Exception
*/
@Test
@@ -137,7 +137,7 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
// Get the recovery configuration
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -145,7 +145,7 @@ public class RecoveryConfigHelperTest {
// Install HDFS::NAMENODE to trigger a component installed event
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
// Verify that the config is stale now
boolean isConfigStale = recoveryConfigHelper.isConfigStale(cluster.getClusterName(), DummyHostname1,
@@ -170,10 +170,10 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
// Get the recovery configuration
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -205,7 +205,7 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setDesiredState(State.INSTALLED);
// Get the recovery configuration
@@ -244,10 +244,10 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
// Get the recovery configuration
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -278,7 +278,7 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
// Get the recovery configuration
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -321,8 +321,8 @@ public class RecoveryConfigHelperTest {
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
// Add SCH to Host1 and Host2
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1").persist();
- hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host2").persist();
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1");
+ hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host2");
// Simulate registration for Host1: Get the recovery configuration right away for Host1.
// It makes an entry for cluster name and Host1 in the timestamp dictionary.
[16/32] ambari git commit: AMBARI-18556 - Remove Unnecessary Locks
Inside Of Service Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
index 84f8124..f99b5ff 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
@@ -18,6 +18,31 @@
package org.apache.ambari.server.controller.internal;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -57,31 +82,6 @@ import org.junit.Test;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
/**
* ServiceResourceProvider tests.
*/
@@ -117,9 +117,8 @@ public class ServiceResourceProviderTest {
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo);
- expect(managementController.getServiceFactory()).andReturn(serviceFactory);
- expect(serviceFactory.createNew(cluster, "Service100")).andReturn(service);
+ expect(cluster.addService("Service100")).andReturn(service);
expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
@@ -202,7 +201,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
@@ -321,7 +319,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
@@ -390,7 +387,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
@@ -458,7 +454,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
@@ -528,7 +523,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(managementController.getHostComponents((Set<ServiceComponentHostRequest>) anyObject())).
andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
@@ -612,7 +606,6 @@ public class ServiceResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
@@ -824,7 +817,7 @@ public class ServiceResourceProviderTest {
Clusters clusters = createNiceMock(Clusters.class);
Cluster cluster = createNiceMock(Cluster.class);
Service service = createNiceMock(Service.class);
-
+
String serviceName = "Service100";
// set expectations
@@ -912,7 +905,7 @@ public class ServiceResourceProviderTest {
// verify
verify(managementController, clusters, cluster, service);
- }
+ }
@Test
public void testDeleteResourcesBadComponentState() throws Exception{
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index a0940ba..96810cf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -180,7 +180,6 @@ public class UpgradeResourceProviderHDP22Test {
// add a single HIVE server
Service service = cluster.addService("HIVE");
service.setDesiredStackVersion(cluster.getDesiredStackVersion());
- service.persist();
ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index c2c8ced..0649191 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -238,7 +238,6 @@ public class UpgradeResourceProviderTest {
// add a single ZK server
Service service = cluster.addService("ZOOKEEPER");
service.setDesiredStackVersion(cluster.getDesiredStackVersion());
- service.persist();
ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
@@ -734,7 +733,6 @@ public class UpgradeResourceProviderTest {
// add additional service for the test
Service service = cluster.addService("HIVE");
service.setDesiredStackVersion(cluster.getDesiredStackVersion());
- service.persist();
ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
index 8efec98..baec7df 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
@@ -178,7 +178,6 @@ public class UpgradeSummaryResourceProviderTest {
// add a single ZOOKEEPER server
Service service = cluster.addService("ZOOKEEPER");
service.setDesiredStackVersion(cluster.getDesiredStackVersion());
- service.persist();
ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
index 710c723..6374be0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
@@ -18,30 +18,32 @@
package org.apache.ambari.server.controller.utilities.state;
- import com.google.inject.Binder;
- import com.google.inject.Guice;
- import com.google.inject.Inject;
- import com.google.inject.Injector;
- import com.google.inject.Module;
- import com.google.inject.persist.PersistService;
- import com.google.inject.util.Modules;
- import org.apache.ambari.server.actionmanager.ActionManager;
- import org.apache.ambari.server.orm.GuiceJpaInitializer;
- import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
- import org.apache.ambari.server.state.Cluster;
- import org.apache.ambari.server.state.Clusters;
- import org.apache.ambari.server.state.Service;
- import org.apache.ambari.server.state.ServiceComponent;
- import org.apache.ambari.server.state.ServiceComponentHost;
- import org.apache.ambari.server.state.StackId;
- import org.apache.ambari.server.state.State;
- import org.apache.ambari.server.topology.TopologyManager;
- import org.apache.ambari.server.utils.StageUtils;
- import org.junit.After;
- import org.junit.Before;
- import org.junit.Test;
import java.util.Map;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.persist.PersistService;
+import com.google.inject.util.Modules;
+
public abstract class GeneralServiceCalculatedStateTest {
final protected String[] hosts = {"h1", "h2"};
@@ -76,7 +78,6 @@ public abstract class GeneralServiceCalculatedStateTest {
service = cluster.addService(getServiceName());
service.setDesiredStackVersion(cluster.getDesiredStackVersion());
- service.persist();
createComponentsAndHosts();
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index bdbaf9b..4066d8f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -381,7 +381,6 @@ public class EventsTest {
private void installHdfsService() throws Exception {
String serviceName = "HDFS";
Service service = m_serviceFactory.createNew(m_cluster, serviceName);
- service.persist();
service = m_cluster.getService(serviceName);
Assert.assertNotNull(service);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 5280ae6..1d1f77e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -431,7 +431,6 @@ public class OrmTestHelper {
ServiceComponentHostFactory schFactory, String hostName) throws Exception {
String serviceName = "HDFS";
Service service = serviceFactory.createNew(cluster, serviceName);
- service.persist();
service = cluster.getService(serviceName);
assertNotNull(service);
@@ -476,7 +475,6 @@ public class OrmTestHelper {
ServiceComponentHostFactory schFactory, String hostName) throws Exception {
String serviceName = "YARN";
Service service = serviceFactory.createNew(cluster, serviceName);
- service.persist();
service = cluster.getService(serviceName);
assertNotNull(service);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java
index 28adb8c..f993bec 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java
@@ -18,22 +18,19 @@
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Provider;
-import junit.framework.Assert;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-
-import java.util.Collection;
-import java.util.HashSet;
-
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.junit.Test;
+
+import com.google.inject.Provider;
+
/**
* HostComponentDesiredStateDAO tests.
*/
@@ -46,19 +43,17 @@ public class HostComponentDesiredStateDAOTest {
EntityManager entityManager = createNiceMock(EntityManager.class);
HostDAO hostDAO = createNiceMock(HostDAO.class);
HostEntity hostEntity = createNiceMock(HostEntity.class);
-
+
HostComponentDesiredStateEntity hostComponentDesiredStateEntity = createNiceMock(HostComponentDesiredStateEntity.class);
- expect(hostComponentDesiredStateEntity.getHostId()).andReturn(1L).anyTimes();
expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
- expect(entityManager.merge(hostComponentDesiredStateEntity)).andReturn(hostComponentDesiredStateEntity).anyTimes();
entityManager.remove(hostComponentDesiredStateEntity);
hostEntity.removeHostComponentDesiredStateEntity(hostComponentDesiredStateEntity);
- expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).anyTimes();
+ expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).atLeastOnce();
- expect(hostDAO.findById(1L)).andReturn(hostEntity).anyTimes();
+ expect(hostComponentDesiredStateEntity.getHostEntity()).andReturn(hostEntity).atLeastOnce();
replay(entityManagerProvider, entityManager, hostDAO, hostEntity, hostComponentDesiredStateEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
index 7c7d0ce..5ea0b1e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
@@ -18,21 +18,19 @@
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Provider;
-import junit.framework.Assert;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-import java.util.Collection;
-import java.util.HashSet;
-
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.junit.Test;
+
+import com.google.inject.Provider;
+
/**
* HostComponentStateDAO tests.
*/
@@ -48,22 +46,12 @@ public class HostComponentStateDAOTest {
HostComponentStateEntity hostComponentStateEntity = createNiceMock(HostComponentStateEntity.class);
- expect(hostComponentStateEntity.getHostName()).andReturn("host1");
- expect(hostDAO.findByName("host1")).andReturn(hostEntity);
expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-
- expect(entityManager.merge(hostComponentStateEntity)).andReturn(hostComponentStateEntity).anyTimes();
entityManager.remove(hostComponentStateEntity);
-
- hostEntity.removeHostComponentStateEntity(hostComponentStateEntity);
-
- expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).anyTimes();
-
replay(entityManagerProvider, entityManager, hostDAO, hostEntity, hostComponentStateEntity);
-
HostComponentStateDAO dao = new HostComponentStateDAO();
dao.entityManagerProvider = entityManagerProvider;
dao.hostDAO = hostDAO;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index d12adde..3a597fd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -375,7 +375,6 @@ public class ComponentVersionCheckActionTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 480dfb3..7ab2856 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -1741,7 +1741,6 @@ public class ConfigureActionTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index d3d8b4c..30eb149 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -1020,7 +1020,6 @@ public class UpgradeActionTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index cfe5d61..ac6a5dd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -100,7 +100,6 @@ public class ServiceComponentTest {
Service s = serviceFactory.createNew(cluster, serviceName);
cluster.addService(s);
- s.persist();
service = cluster.getService(serviceName);
Assert.assertNotNull(service);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
index 65751af..0f5c432 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
@@ -120,7 +120,6 @@ public class ServiceTest {
String serviceName = "HDFS";
Service s = serviceFactory.createNew(cluster, serviceName);
cluster.addService(s);
- s.persist();
Service service = cluster.getService(serviceName);
Assert.assertNotNull(service);
@@ -142,7 +141,6 @@ public class ServiceTest {
String serviceName = "HDFS";
Service s = serviceFactory.createNew(cluster, serviceName);
cluster.addService(s);
- s.persist();
Service service = cluster.getService(serviceName);
@@ -268,7 +266,6 @@ public class ServiceTest {
String serviceName = "HDFS";
Service s = serviceFactory.createNew(cluster, serviceName);
cluster.addService(s);
- s.persist();
Service service = cluster.getService(serviceName);
Assert.assertNotNull(service);
@@ -292,7 +289,6 @@ public class ServiceTest {
String serviceName = "HDFS";
Service s = serviceFactory.createNew(cluster, serviceName);
cluster.addService(s);
- s.persist();
Service service = cluster.getService(serviceName);
Assert.assertNotNull(service);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
index c511f1a..015660c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
@@ -19,8 +19,6 @@ package org.apache.ambari.server.state.alerts;
import java.util.UUID;
-import junit.framework.Assert;
-
import org.apache.ambari.server.events.AlertDefinitionChangedEvent;
import org.apache.ambari.server.events.AlertDefinitionDeleteEvent;
import org.apache.ambari.server.events.AmbariEvent;
@@ -51,12 +49,14 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.experimental.categories.Category;
import com.google.gson.Gson;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import org.junit.experimental.categories.Category;
+
+import junit.framework.Assert;
/**
* Tests that {@link AmbariEvent} instances are fired correctly and that alert
@@ -306,7 +306,6 @@ public class AlertEventPublisherTest {
private void installHdfsService() throws Exception {
String serviceName = "HDFS";
Service service = serviceFactory.createNew(cluster, serviceName);
- service.persist();
service = cluster.getService(serviceName);
Assert.assertNotNull(service);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
index c7a5915..85d13a6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
@@ -41,6 +41,7 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.experimental.categories.Category;
import com.google.common.eventbus.EventBus;
import com.google.inject.Binder;
@@ -51,7 +52,6 @@ import com.google.inject.persist.PersistService;
import com.google.inject.util.Modules;
import junit.framework.Assert;
-import org.junit.experimental.categories.Category;
/**
* Tests that {@link InitialAlertEventTest} instances are fired correctly.
@@ -177,7 +177,6 @@ public class InitialAlertEventTest {
private void installHdfsService() throws Exception {
String serviceName = "HDFS";
Service service = m_serviceFactory.createNew(m_cluster, serviceName);
- service.persist();
service = m_cluster.getService(serviceName);
Assert.assertNotNull(service);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index ab2628b..e5eb942 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -595,7 +595,6 @@ public class ClusterDeadlockTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index ba0ff11..811d98b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -214,7 +214,6 @@ public class ClusterImplTest {
clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
nameNode.persist();
@@ -231,7 +230,6 @@ public class ClusterImplTest {
hdfsClient.addServiceComponentHost(hostName2).persist();
Service tez = cluster.addService(serviceToDelete);
- tez.persist();
ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
tezClient.persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 4d39f71..fed9b98 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -383,9 +383,6 @@ public class ClusterTest {
cluster.addService(s1);
cluster.addService(s2);
cluster.addService(s3);
- s1.persist();
- s2.persist();
- s3.persist();
// Add HDFS components
ServiceComponent sc1CompA = serviceComponentFactory.createNew(s1, "NAMENODE");
@@ -666,9 +663,6 @@ public class ClusterTest {
Service s1 = serviceFactory.createNew(c1, "HDFS");
Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
- s1.persist();
- s2.persist();
-
Service s = c1.getService("HDFS");
Assert.assertNotNull(s);
Assert.assertEquals("HDFS", s.getName());
@@ -696,7 +690,6 @@ public class ClusterTest {
Service s = serviceFactory.createNew(c1, "HDFS");
c1.addService(s);
- s.persist();
ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(sc);
sc.persist();
@@ -716,7 +709,6 @@ public class ClusterTest {
iterator.next();
Service s1 = serviceFactory.createNew(c1, "PIG");
c1.addService(s1);
- s1.persist();
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG");
s1.addServiceComponent(sc1);
sc1.persist();
@@ -738,7 +730,6 @@ public class ClusterTest {
Service s = serviceFactory.createNew(c1, "HDFS");
c1.addService(s);
- s.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
@@ -772,7 +763,6 @@ public class ClusterTest {
Service s = serviceFactory.createNew(c1, "HDFS");
c1.addService(s);
- s.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
@@ -812,7 +802,6 @@ public class ClusterTest {
Service s = serviceFactory.createNew(c1, "HDFS");
c1.addService(s);
- s.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
s.addServiceComponent(scNN);
@@ -850,11 +839,9 @@ public class ClusterTest {
Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
c1.addService(sfHDFS);
- sfHDFS.persist();
Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
c1.addService(sfMR);
- sfMR.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
@@ -915,11 +902,9 @@ public class ClusterTest {
Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
c1.addService(sfHDFS);
- sfHDFS.persist();
Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
c1.addService(sfMR);
- sfMR.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
@@ -981,11 +966,9 @@ public class ClusterTest {
Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
c1.addService(sfHDFS);
- sfHDFS.persist();
Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
c1.addService(sfMR);
- sfMR.persist();
ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
sfHDFS.addServiceComponent(scNN);
@@ -1185,10 +1168,9 @@ public class ClusterTest {
public void testDeleteService() throws Exception {
createDefaultCluster();
- c1.addService("MAPREDUCE").persist();
+ c1.addService("MAPREDUCE");
Service hdfs = c1.addService("HDFS");
- hdfs.persist();
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
nameNode.persist();
@@ -1208,7 +1190,7 @@ public class ClusterTest {
public void testDeleteServiceWithConfigHistory() throws Exception {
createDefaultCluster();
- c1.addService("HDFS").persist();
+ c1.addService("HDFS");
Config config1 = configFactory.createNew(c1, "hdfs-site",
new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
index d2d07b5..1012534 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
@@ -385,7 +385,6 @@ public class ClustersDeadlockTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 2f2be0c..94bd6d4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -440,7 +440,6 @@ public class ClustersTest {
host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index 992b8fe..9106ec3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -224,7 +224,6 @@ public class ConcurrentServiceConfigVersionTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 0f272f6..4c87613 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -253,7 +253,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
} catch (ServiceNotFoundException e) {
service = serviceFactory.createNew(cluster, serviceName);
cluster.addService(service);
- service.persist();
}
return service;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 7f12eb7..080886e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -182,7 +182,6 @@ public class ServiceComponentHostTest {
+ ", serviceName=" + svc);
s = serviceFactory.createNew(c, svc);
c.addService(s);
- s.persist();
}
ServiceComponent sc = null;
[32/32] ambari git commit: Merge branch 'branch-feature-AMBARI-18456'
into trunk
Posted by jo...@apache.org.
Merge branch 'branch-feature-AMBARI-18456' into trunk
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/efe38bec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/efe38bec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/efe38bec
Branch: refs/heads/trunk
Commit: efe38bece39de0241c8656a4c77a733a3c878987
Parents: dedcdf9 159ad00
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 19 12:06:10 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Oct 19 12:06:10 2016 -0400
----------------------------------------------------------------------
ambari-project/pom.xml | 2 +-
.../ambari/server/api/services/BaseRequest.java | 49 +-
.../controller/AmbariManagementController.java | 8 -
.../AmbariManagementControllerImpl.java | 33 +-
.../ambari/server/controller/AmbariServer.java | 11 +-
.../internal/ComponentResourceProvider.java | 26 +-
.../internal/ServiceResourceProvider.java | 38 +-
.../internal/UpgradeResourceProvider.java | 2 +-
.../alerts/AlertServiceStateListener.java | 122 +-
.../orm/dao/ConfigGroupHostMappingDAO.java | 137 +-
.../orm/dao/HostComponentDesiredStateDAO.java | 7 +-
.../server/orm/dao/HostComponentStateDAO.java | 9 +-
.../apache/ambari/server/orm/dao/HostDAO.java | 2 +-
.../orm/entities/ClusterServiceEntity.java | 2 +-
.../HostComponentDesiredStateEntity.java | 11 +
.../orm/entities/HostComponentStateEntity.java | 11 +
.../org/apache/ambari/server/state/Cluster.java | 12 -
.../apache/ambari/server/state/Clusters.java | 9 +
.../apache/ambari/server/state/ConfigImpl.java | 98 +-
.../org/apache/ambari/server/state/Host.java | 11 -
.../apache/ambari/server/state/HostConfig.java | 12 +-
.../org/apache/ambari/server/state/Service.java | 13 -
.../ambari/server/state/ServiceComponent.java | 13 -
.../server/state/ServiceComponentHost.java | 6 -
.../server/state/ServiceComponentImpl.java | 719 ++++-----
.../apache/ambari/server/state/ServiceImpl.java | 642 +++------
.../server/state/cluster/ClusterImpl.java | 1360 +++++++-----------
.../server/state/cluster/ClustersImpl.java | 628 ++++----
.../state/configgroup/ConfigGroupImpl.java | 92 +-
.../ambari/server/state/host/HostFactory.java | 2 +-
.../ambari/server/state/host/HostImpl.java | 948 ++++--------
.../svccomphost/ServiceComponentHostImpl.java | 1066 +++++---------
.../ambari/server/topology/TopologyManager.java | 1 -
.../apache/ambari/server/utils/RetryHelper.java | 19 +-
.../ExecutionCommandWrapperTest.java | 1 -
.../actionmanager/TestActionDBAccessorImpl.java | 9 -
.../server/actionmanager/TestActionManager.java | 8 +-
.../ambari/server/agent/AgentResourceTest.java | 12 +-
.../server/agent/HeartbeatProcessorTest.java | 132 +-
.../server/agent/HeartbeatTestHelper.java | 16 +-
.../server/agent/TestHeartbeatHandler.java | 136 +-
.../server/agent/TestHeartbeatMonitor.java | 87 +-
.../server/api/services/AmbariMetaInfoTest.java | 3 +
.../server/api/services/ClusterServiceTest.java | 43 +-
.../server/checks/InstallPackagesCheckTest.java | 5 +-
.../configuration/RecoveryConfigHelperTest.java | 122 +-
.../AmbariCustomCommandExecutionHelperTest.java | 1 -
.../AmbariManagementControllerTest.java | 185 +--
.../BackgroundCustomCommandExecutionTest.java | 1 -
.../server/controller/KerberosHelperTest.java | 80 +-
...hYarnCapacitySchedulerReleaseConfigTest.java | 7 +-
.../internal/ClusterResourceProviderTest.java | 7 +-
.../internal/JMXHostProviderTest.java | 6 -
.../PreUpgradeCheckResourceProviderTest.java | 52 +-
.../internal/ServiceResourceProviderTest.java | 63 +-
.../StackDefinedPropertyProviderTest.java | 1 -
.../UpgradeResourceProviderHDP22Test.java | 2 -
.../internal/UpgradeResourceProviderTest.java | 4 -
.../UpgradeSummaryResourceProviderTest.java | 48 +-
.../DefaultServiceCalculatedStateTest.java | 6 +-
.../state/FlumeServiceCalculatedStateTest.java | 7 +-
.../GeneralServiceCalculatedStateTest.java | 47 +-
.../state/HBaseServiceCalculatedStateTest.java | 7 +-
.../state/HDFSServiceCalculatedStateTest.java | 7 +-
.../state/HiveServiceCalculatedStateTest.java | 7 +-
.../state/OozieServiceCalculatedStateTest.java | 7 +-
.../state/YarnServiceCalculatedStateTest.java | 7 +-
.../apache/ambari/server/events/EventsTest.java | 5 -
.../HostVersionOutOfSyncListenerTest.java | 3 -
.../apache/ambari/server/orm/OrmTestHelper.java | 34 +-
.../server/orm/dao/ClusterVersionDAOTest.java | 12 +-
.../server/orm/dao/ConfigGroupDAOTest.java | 33 +-
.../dao/HostComponentDesiredStateDAOTest.java | 27 +-
.../orm/dao/HostComponentStateDAOTest.java | 28 +-
.../orm/dao/RepositoryVersionDAOTest.java | 6 +-
.../ambari/server/orm/dao/SettingDAOTest.java | 19 +-
.../ambari/server/orm/dao/WidgetDAOTest.java | 8 +-
.../server/orm/dao/WidgetLayoutDAOTest.java | 13 +-
.../ComponentVersionCheckActionTest.java | 5 -
.../upgrades/ConfigureActionTest.java | 2 -
.../upgrades/UpgradeActionTest.java | 8 -
.../ambari/server/state/ConfigGroupTest.java | 10 +-
.../ambari/server/state/ConfigHelperTest.java | 12 +-
.../server/state/RequestExecutionTest.java | 12 +-
.../server/state/ServiceComponentTest.java | 47 +-
.../apache/ambari/server/state/ServiceTest.java | 15 +-
.../ambari/server/state/UpgradeHelperTest.java | 8 -
.../state/alerts/AlertEventPublisherTest.java | 7 +-
.../state/alerts/InitialAlertEventTest.java | 3 +-
.../state/cluster/ClusterDeadlockTest.java | 41 +-
.../cluster/ClusterEffectiveVersionTest.java | 19 +-
.../server/state/cluster/ClusterImplTest.java | 66 +-
.../server/state/cluster/ClusterTest.java | 122 +-
.../state/cluster/ClustersDeadlockTest.java | 12 +-
.../server/state/cluster/ClustersTest.java | 17 -
.../ConcurrentServiceConfigVersionTest.java | 4 -
...omponentHostConcurrentWriteDeadlockTest.java | 4 -
.../ambari/server/state/host/HostImplTest.java | 50 +-
.../ambari/server/state/host/HostTest.java | 22 +-
.../svccomphost/ServiceComponentHostTest.java | 5 -
.../server/update/HostUpdateHelperTest.java | 40 +-
.../ambari/server/utils/StageUtilsTest.java | 3 +-
102 files changed, 2958 insertions(+), 5011 deletions(-)
----------------------------------------------------------------------
[23/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/20f29780
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/20f29780
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/20f29780
Branch: refs/heads/trunk
Commit: 20f297803161e7c927446283d1f81043d70055c8
Parents: a851d8f 9b0f631
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Oct 14 09:40:28 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 14 09:40:28 2016 -0400
----------------------------------------------------------------------
.../resource_management/TestPackageResource.py | 10 +-
.../resource_management/core/exceptions.py | 14 +-
.../core/providers/package/__init__.py | 67 ++++--
.../core/providers/package/apt.py | 21 +-
.../core/providers/package/yumrpm.py | 7 +-
.../core/providers/package/zypper.py | 5 +
.../python/resource_management/core/shell.py | 5 +-
.../libraries/functions/get_user_call_output.py | 4 +-
ambari-project/pom.xml | 2 +-
.../ambari/server/checks/CheckDescription.java | 6 +-
.../server/checks/HiveNotRollingWarning.java | 74 ++++++
.../checks/HiveRollingPortChangeWarning.java | 69 ------
.../AmbariManagementControllerImpl.java | 2 +-
.../dao/ServiceComponentDesiredStateDAO.java | 23 +-
.../ServiceComponentDesiredStateEntity.java | 24 ++
.../entities/ServiceComponentVersionEntity.java | 159 +++++++++++++
.../apache/ambari/server/stack/RepoUtil.java | 3 +-
.../state/repository/VersionDefinitionXml.java | 4 +-
.../server/upgrade/SchemaUpgradeHelper.java | 1 +
.../server/upgrade/UpgradeCatalog242.java | 124 ++++++++++
.../server/upgrade/UpgradeCatalog250.java | 37 +++
.../server/upgrade/UpgradeCatalog300.java | 1 -
.../main/resources/Ambari-DDL-Derby-CREATE.sql | 15 +-
.../main/resources/Ambari-DDL-MySQL-CREATE.sql | 16 +-
.../main/resources/Ambari-DDL-Oracle-CREATE.sql | 12 +
.../resources/Ambari-DDL-Postgres-CREATE.sql | 14 +-
.../resources/Ambari-DDL-SQLAnywhere-CREATE.sql | 12 +
.../resources/Ambari-DDL-SQLServer-CREATE.sql | 14 +-
.../src/main/resources/META-INF/persistence.xml | 1 +
.../HBASE/0.96.0.2.0/package/scripts/upgrade.py | 48 +++-
.../0.12.0.2.0/package/scripts/hive_server.py | 27 +--
.../package/scripts/hive_server_upgrade.py | 10 +-
.../4.0.0.2.0/package/scripts/oozie_service.py | 2 +-
.../OOZIE/4.0.0.2.0/package/scripts/params.py | 2 +-
.../stacks/HDP/2.2/upgrades/config-upgrade.xml | 52 -----
.../stacks/HDP/2.2/upgrades/upgrade-2.2.xml | 24 --
.../stacks/HDP/2.2/upgrades/upgrade-2.3.xml | 47 ++--
.../stacks/HDP/2.2/upgrades/upgrade-2.4.xml | 37 +--
.../stacks/HDP/2.3/upgrades/config-upgrade.xml | 77 -------
.../stacks/HDP/2.3/upgrades/upgrade-2.3.xml | 24 --
.../stacks/HDP/2.3/upgrades/upgrade-2.4.xml | 21 +-
.../stacks/HDP/2.3/upgrades/upgrade-2.5.xml | 21 +-
.../stacks/HDP/2.4/upgrades/config-upgrade.xml | 26 ---
.../stacks/HDP/2.4/upgrades/upgrade-2.4.xml | 24 --
.../stacks/HDP/2.4/upgrades/upgrade-2.5.xml | 21 +-
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 31 ---
.../stacks/HDP/2.5/upgrades/upgrade-2.5.xml | 24 --
.../checks/HiveNotRollingWarningTest.java | 96 ++++++++
.../HiveRollingPortChangeWarningTest.java | 96 --------
.../server/state/ServiceComponentTest.java | 118 ++++++++++
.../server/upgrade/UpgradeCatalog242Test.java | 226 +++++++++++++++++++
.../server/upgrade/UpgradeCatalog250Test.java | 111 +++++++--
.../server/upgrade/UpgradeCatalog300Test.java | 2 -
.../stacks/2.0.6/HIVE/test_hive_server.py | 27 ++-
.../stacks/2.0.6/OOZIE/test_oozie_server.py | 10 +
55 files changed, 1257 insertions(+), 693 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/20f29780/ambari-project/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/20f29780/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/20f29780/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
[13/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/78a875cf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/78a875cf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/78a875cf
Branch: refs/heads/trunk
Commit: 78a875cf2774457e769d147d2e985de20f6e64b8
Parents: 3870044 56e00ac
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Oct 6 12:00:10 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Oct 6 12:00:10 2016 -0400
----------------------------------------------------------------------
.../src/main/python/ambari_agent/HostCleanup.py | 32 +++++----
.../python/ambari_agent/alerts/web_alert.py | 12 +---
.../main/python/ambari_commons/inet_utils.py | 21 ++++++
.../libraries/providers/hdfs_resource.py | 2 +-
ambari-metrics/pom.xml | 4 +-
.../ambari/server/api/services/UserService.java | 2 +-
.../server/controller/StackServiceResponse.java | 16 ++++-
.../internal/StackServiceResourceProvider.java | 6 ++
.../internal/UserResourceProvider.java | 25 ++++++-
.../predicate/ComparisonPredicate.java | 16 ++++-
.../controller/predicate/EqualsPredicate.java | 15 +++++
.../server/security/authorization/User.java | 2 +-
.../server/security/authorization/Users.java | 8 +--
.../RangerKerberosConfigCalculation.java | 32 +++++----
.../ambari/server/stack/ServiceModule.java | 6 +-
.../apache/ambari/server/state/ServiceInfo.java | 27 ++++++++
.../main/resources/Ambari-DDL-MySQL-CREATE.sql | 12 ++--
.../HDFS/2.1.0.2.0/package/scripts/utils.py | 15 +----
.../src/main/resources/properties.json | 1 +
.../resources/stacks/HDP/2.6/repos/repoinfo.xml | 18 ++---
.../server/api/services/UserServiceTest.java | 71 --------------------
.../server/security/SecurityHelperImplTest.java | 4 +-
.../AmbariUserAuthenticationFilterTest.java | 2 +-
.../RangerKerberosConfigCalculationTest.java | 6 +-
.../ambari/server/stack/ServiceModuleTest.java | 19 ++++++
.../ambari/server/state/ServiceInfoTest.java | 30 +++++++++
.../server/upgrade/UpgradeCatalog240Test.java | 6 +-
pom.xml | 6 --
utility/pom.xml | 34 +++++++++-
29 files changed, 284 insertions(+), 166 deletions(-)
----------------------------------------------------------------------
[30/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/532caef3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/532caef3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/532caef3
Branch: refs/heads/trunk
Commit: 532caef33fe65d6933e9797d3db12065555bd69b
Parents: 2c60082 7ed5259
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 18 15:32:08 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 18 15:32:08 2016 -0400
----------------------------------------------------------------------
.../src/main/python/ambari_agent/ActionQueue.py | 26 +--
.../main/python/ambari_commons/thread_utils.py | 43 ++++
ambari-server/src/main/assemblies/server.xml | 4 +
.../controller/ServiceComponentHostRequest.java | 10 +
.../ServiceComponentHostResponse.java | 19 +-
.../internal/HostComponentResourceProvider.java | 9 +
.../internal/JsonHttpPropertyRequest.java | 6 +-
.../controller/internal/URLStreamProvider.java | 3 +
.../internal/UpgradeResourceProvider.java | 4 +-
.../ambari/server/state/ConfigHelper.java | 17 +-
.../ambari/server/state/PropertyInfo.java | 11 +
.../state/PropertyStackUpgradeBehavior.java | 57 +++++
.../server/state/ServiceComponentHost.java | 6 +
.../svccomphost/ServiceComponentHostImpl.java | 10 +-
.../src/main/resources/APACHE-AMBARI-MIB.txt | 206 ++++++++++++++++++
.../src/main/resources/configuration-schema.xsd | 5 +
.../src/main/resources/properties.json | 1 +
.../services/STORM/configuration/storm-site.xml | 1 +
.../ClientConfigResourceProviderTest.java | 6 +-
.../HostComponentResourceProviderTest.java | 10 +-
.../internal/HostResourceProviderTest.java | 26 +--
.../StackUpgradeConfigurationMergeTest.java | 4 +-
.../internal/UpgradeResourceProviderTest.java | 6 +-
.../ambari/server/state/ConfigHelperTest.java | 210 +++++++++++++------
.../app/controllers/global/update_controller.js | 3 +-
.../main/admin/stack_and_upgrade_controller.js | 22 +-
ambari-web/app/controllers/wizard.js | 12 +-
.../app/mappers/component_config_mapper.js | 1 +
ambari-web/app/mappers/hosts_mapper.js | 2 +
.../app/mappers/service_metrics_mapper.js | 1 +
ambari-web/app/mixins/common/serverValidator.js | 22 +-
ambari-web/app/models/host_component.js | 1 +
ambari-web/app/styles/config_history_flow.less | 31 ++-
.../common/configs/config_history_flow.hbs | 32 +--
.../service/info/summary/master_components.hbs | 2 +-
ambari-web/app/utils/ajax/ajax.js | 2 +-
.../views/common/configs/config_history_flow.js | 3 +-
ambari-web/pom.xml | 10 +-
.../admin/stack_and_upgrade_controller_test.js | 154 ++++++++++++--
.../test/mixins/common/serverValidator_test.js | 18 +-
contrib/alert-snmp-mib/APACHE-AMBARI-MIB.txt | 206 ------------------
contrib/alert-snmp-mib/README.md | 2 +-
.../NIFI/1.0.0/package/scripts/params.py | 2 +-
.../resources/stacks/HDF/2.0/repos/repoinfo.xml | 20 +-
44 files changed, 841 insertions(+), 405 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/532caef3/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/532caef3/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/532caef3/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 485329b,74eaa62..c1655aa
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@@ -1233,48 -1355,60 +1238,49 @@@ public class ServiceComponentHostImpl i
@Override
public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
- clusterGlobalLock.readLock().lock();
- try {
- readLock.lock();
- try {
- HostComponentStateEntity hostComponentStateEntity = getStateEntity();
- if (null == hostComponentStateEntity) {
- LOG.warn("Could not convert ServiceComponentHostResponse to a response. It's possible that Host " + getHostName() + " was deleted.");
- return null;
- }
+ HostComponentStateEntity hostComponentStateEntity = getStateEntity();
+ if (null == hostComponentStateEntity) {
+ LOG.warn(
+ "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
+ getHostName());
+ return null;
+ }
- String clusterName = serviceComponent.getClusterName();
- String serviceName = serviceComponent.getServiceName();
- String serviceComponentName = serviceComponent.getName();
- String hostName = getHostName();
- String publicHostName = getPublicHostName();
- String state = getState().toString();
- String stackId = getStackVersion().getStackId();
- String desiredState = getDesiredState().toString();
- String desiredStackId = getDesiredStackVersion().getStackId();
- HostComponentAdminState componentAdminState = getComponentAdminState();
- UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
-
- String displayName = null;
- try {
- ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
- getStackVersion().getStackVersion(), serviceName, serviceComponentName);
- displayName = compInfo.getDisplayName();
- } catch (AmbariException e) {
- displayName = serviceComponentName;
- }
+ String clusterName = serviceComponent.getClusterName();
+ String serviceName = serviceComponent.getServiceName();
+ String serviceComponentName = serviceComponent.getName();
+ String hostName = getHostName();
++ String publicHostName = getPublicHostName();
+ String state = getState().toString();
+ String stackId = getStackVersion().getStackId();
+ String desiredState = getDesiredState().toString();
+ String desiredStackId = getDesiredStackVersion().getStackId();
+ HostComponentAdminState componentAdminState = getComponentAdminState();
+ UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
- ServiceComponentHostResponse r = new ServiceComponentHostResponse(
- clusterName, serviceName,
- serviceComponentName, displayName, hostName,
- publicHostName, state,
- stackId, desiredState,
- desiredStackId, componentAdminState);
+ String displayName = null;
+ try {
+ ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
+ getStackVersion().getStackVersion(), serviceName, serviceComponentName);
+ displayName = compInfo.getDisplayName();
+ } catch (AmbariException e) {
+ displayName = serviceComponentName;
+ }
- r.setActualConfigs(actualConfigs);
- r.setUpgradeState(upgradeState);
+ ServiceComponentHostResponse r = new ServiceComponentHostResponse(clusterName, serviceName,
- serviceComponentName, displayName, hostName, state, stackId, desiredState, desiredStackId,
- componentAdminState);
++ serviceComponentName, displayName, hostName, publicHostName, state, stackId,
++ desiredState, desiredStackId, componentAdminState);
- try {
- r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
- } catch (Exception e) {
- LOG.error("Could not determine stale config", e);
- }
+ r.setActualConfigs(actualConfigs);
+ r.setUpgradeState(upgradeState);
- return r;
- } finally {
- readLock.unlock();
- }
- } finally {
- clusterGlobalLock.readLock().unlock();
+ try {
+ r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
+ } catch (Exception e) {
+ LOG.error("Could not determine stale config", e);
}
+
+ return r;
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/532caef3/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/532caef3/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index a802a83,d014bde..d50c92d
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@@ -108,8 -116,13 +109,10 @@@ public class ConfigHelperTest
Assert.assertNotNull(cluster);
clusters.addHost("h1");
clusters.addHost("h2");
+ clusters.addHost("h3");
Assert.assertNotNull(clusters.getHost("h1"));
Assert.assertNotNull(clusters.getHost("h2"));
+ Assert.assertNotNull(clusters.getHost("h3"));
- clusters.getHost("h1").persist();
- clusters.getHost("h2").persist();
- clusters.getHost("h3").persist();
// core-site
ConfigurationRequest cr = new ConfigurationRequest();
[22/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a851d8f1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a851d8f1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a851d8f1
Branch: refs/heads/trunk
Commit: a851d8f128b6ea5f1b247a931f48f35fdc35db2a
Parents: 18c7412 2131466
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Oct 13 08:31:03 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Oct 13 08:31:03 2016 -0400
----------------------------------------------------------------------
ambari-server/conf/windows/ambari.properties | 1 +
.../server/actionmanager/ActionDBAccessor.java | 13 +++
.../actionmanager/ActionDBAccessorImpl.java | 14 +++
.../server/actionmanager/ActionScheduler.java | 4 +-
.../server/configuration/Configuration.java | 23 +++++
.../ambari/server/controller/AmbariServer.java | 50 +++++++++++
.../apache/ambari/server/orm/dao/StageDAO.java | 10 +++
.../ambari/server/orm/entities/StageEntity.java | 5 +-
.../0.1.0/package/scripts/metrics_grafana.py | 6 +-
.../0.1.0/package/scripts/params_linux.py | 6 ++
.../stacks/HDP/2.5/upgrades/upgrade-2.5.xml | 2 +-
.../actionmanager/TestActionScheduler.java | 12 +--
.../scheduler/ExecutionScheduleManagerTest.java | 90 ++++++++++----------
.../server/upgrade/UpgradeCatalog221Test.java | 42 +++++----
.../AMBARI_METRICS/test_metrics_grafana.py | 5 +-
.../2.0.6/configs/default_ams_embedded.json | 9 +-
ambari-web/app/controllers/installer.js | 3 +
ambari-web/app/views/wizard/step1_view.js | 8 +-
18 files changed, 223 insertions(+), 80 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a851d8f1/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
[25/32] ambari git commit: AMBARI-18456 - Refactor Unnecessary
In-Memory Locks Around Business Objects (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-18456 - Refactor Unnecessary In-Memory Locks Around Business Objects (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d6a84715
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d6a84715
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d6a84715
Branch: refs/heads/trunk
Commit: d6a847154e80158a0be0a26ee7dfc0d6dccac686
Parents: 20f2978
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Oct 14 15:01:38 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sun Oct 16 09:28:50 2016 -0400
----------------------------------------------------------------------
.../ambari/annotations/ExperimentalFeature.java | 8 +-
.../internal/ComponentResourceProvider.java | 26 +-
.../ambari/server/state/ServiceComponent.java | 6 -
.../server/state/ServiceComponentImpl.java | 465 +++++++------------
.../apache/ambari/server/state/ServiceImpl.java | 10 -
.../server/state/cluster/ClusterImpl.java | 5 +-
.../server/state/cluster/ClustersImpl.java | 5 +-
.../ambari/server/state/host/HostImpl.java | 4 +-
.../svccomphost/ServiceComponentHostImpl.java | 6 -
.../server/agent/HeartbeatProcessorTest.java | 54 +--
.../server/agent/TestHeartbeatHandler.java | 50 +-
.../server/agent/TestHeartbeatMonitor.java | 38 +-
.../configuration/RecoveryConfigHelperTest.java | 9 -
.../AmbariManagementControllerTest.java | 60 +--
.../apache/ambari/server/events/EventsTest.java | 1 -
.../apache/ambari/server/orm/OrmTestHelper.java | 3 -
.../ComponentVersionCheckActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 1 -
.../server/state/ServiceComponentTest.java | 9 -
.../apache/ambari/server/state/ServiceTest.java | 6 -
.../state/cluster/ClusterDeadlockTest.java | 3 -
.../server/state/cluster/ClusterImplTest.java | 4 -
.../server/state/cluster/ClusterTest.java | 27 +-
.../state/cluster/ClustersDeadlockTest.java | 1 -
.../server/state/cluster/ClustersTest.java | 3 -
.../ConcurrentServiceConfigVersionTest.java | 1 -
...omponentHostConcurrentWriteDeadlockTest.java | 1 -
.../svccomphost/ServiceComponentHostTest.java | 1 -
28 files changed, 289 insertions(+), 519 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
index 7798f26..1d5ba0e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
+++ b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
@@ -18,7 +18,6 @@
package org.apache.ambari.annotations;
import java.util.concurrent.Executor;
-import java.util.concurrent.locks.Lock;
/**
* The {@link ExperimentalFeature} enumeration is meant to be used with the
@@ -41,10 +40,5 @@ public enum ExperimentalFeature {
/**
* Used for code that is targeted for patch upgrades
*/
- PATCH_UPGRADES,
-
- /**
- * The removal of the cluster global {@link Lock}
- */
- CLUSTER_GLOBAL_LOCK_REMOVAL
+ PATCH_UPGRADES
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index fe89752..241a48f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -17,9 +17,16 @@
*/
package org.apache.ambari.server.controller.internal;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ClusterNotFoundException;
import org.apache.ambari.server.DuplicateResourceException;
@@ -57,15 +64,9 @@ import org.apache.ambari.server.state.State;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
/**
* Resource provider for component resources.
@@ -379,7 +380,6 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
}
s.addServiceComponent(sc);
- sc.persist();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index 8387ab8..f91a958 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -72,12 +72,6 @@ public interface ServiceComponent {
ServiceComponentResponse convertToResponse();
- void refresh();
-
- boolean isPersisted();
-
- void persist();
-
void debugDump(StringBuilder sb);
boolean isClientComponent();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 282396d..f383e80 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -26,8 +26,6 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ObjectNotFoundException;
import org.apache.ambari.server.ServiceComponentHostNotFoundException;
@@ -50,8 +48,6 @@ import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.ProvisionException;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.assistedinject.AssistedInject;
@@ -68,47 +64,55 @@ public class ServiceComponentImpl implements ServiceComponent {
private final boolean isClientComponent;
private final boolean isMasterComponent;
private final boolean isVersionAdvertised;
- volatile boolean persisted = false;
- @Inject
- private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
- @Inject
- private ClusterServiceDAO clusterServiceDAO;
- @Inject
- private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
- @Inject
- private ServiceComponentHostFactory serviceComponentHostFactory;
- @Inject
- private AmbariMetaInfo ambariMetaInfo;
- @Inject
- private AmbariEventPublisher eventPublisher;
-
- ServiceComponentDesiredStateEntity desiredStateEntity;
- private ConcurrentMap<String, ServiceComponentHost> hostComponents;
+
+ private final ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
+
+ private final ClusterServiceDAO clusterServiceDAO;
+
+ private final ServiceComponentHostFactory serviceComponentHostFactory;
+
+ private final AmbariEventPublisher eventPublisher;
+
+ private final ConcurrentMap<String, ServiceComponentHost> hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
+
+ /**
+ * The ID of the persisted {@link ServiceComponentDesiredStateEntity}.
+ */
+ private final long desiredStateEntityId;
/**
* Data access object used for lookup up stacks.
*/
- @Inject
- private StackDAO stackDAO;
+ private final StackDAO stackDAO;
@AssistedInject
- public ServiceComponentImpl(@Assisted Service service,
- @Assisted String componentName, Injector injector) throws AmbariException {
- injector.injectMembers(this);
+ public ServiceComponentImpl(@Assisted Service service, @Assisted String componentName,
+ AmbariMetaInfo ambariMetaInfo,
+ ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+ ClusterServiceDAO clusterServiceDAO, ServiceComponentHostFactory serviceComponentHostFactory,
+ StackDAO stackDAO, AmbariEventPublisher eventPublisher)
+ throws AmbariException {
+
this.service = service;
+ this.componentName = componentName;
+ this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+ this.clusterServiceDAO = clusterServiceDAO;
+ this.serviceComponentHostFactory = serviceComponentHostFactory;
+ this.stackDAO = stackDAO;
+ this.eventPublisher = eventPublisher;
+
+ StackId stackId = service.getDesiredStackVersion();
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
- desiredStateEntity = new ServiceComponentDesiredStateEntity();
+ ServiceComponentDesiredStateEntity desiredStateEntity = new ServiceComponentDesiredStateEntity();
desiredStateEntity.setComponentName(componentName);
desiredStateEntity.setDesiredState(State.INIT);
desiredStateEntity.setDesiredVersion(State.UNKNOWN.toString());
desiredStateEntity.setServiceName(service.getName());
desiredStateEntity.setClusterId(service.getClusterId());
desiredStateEntity.setRecoveryEnabled(false);
- setDesiredStackVersion(service.getDesiredStackVersion());
+ desiredStateEntity.setDesiredStack(stackEntity);
- hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
-
- StackId stackId = service.getDesiredStackVersion();
try {
ComponentInfo compInfo = ambariMetaInfo.getComponent(stackId.getStackName(),
stackId.getStackVersion(), service.getName(), componentName);
@@ -124,17 +128,29 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", componentName=" + componentName
+ ", stackInfo=" + stackId.getStackId());
}
- this.componentName = componentName;
+
+ persistEntities(desiredStateEntity);
+ desiredStateEntityId = desiredStateEntity.getId();
}
@AssistedInject
public ServiceComponentImpl(@Assisted Service service,
- @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
- Injector injector) throws AmbariException {
- injector.injectMembers(this);
+ @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
+ AmbariMetaInfo ambariMetaInfo,
+ ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+ ClusterServiceDAO clusterServiceDAO,
+ HostComponentDesiredStateDAO hostComponentDesiredStateDAO,
+ ServiceComponentHostFactory serviceComponentHostFactory, StackDAO stackDAO,
+ AmbariEventPublisher eventPublisher)
+ throws AmbariException {
this.service = service;
+ this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+ this.clusterServiceDAO = clusterServiceDAO;
+ this.serviceComponentHostFactory = serviceComponentHostFactory;
+ this.stackDAO = stackDAO;
+ this.eventPublisher = eventPublisher;
- desiredStateEntity = serviceComponentDesiredStateEntity;
+ desiredStateEntityId = serviceComponentDesiredStateEntity.getId();
componentName = serviceComponentDesiredStateEntity.getComponentName();
StackId stackId = service.getDesiredStackVersion();
@@ -155,8 +171,7 @@ public class ServiceComponentImpl implements ServiceComponent {
+ ", stackInfo=" + stackId.getStackId());
}
- hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
- for (HostComponentStateEntity hostComponentStateEntity : desiredStateEntity.getHostComponentStateEntities()) {
+ for (HostComponentStateEntity hostComponentStateEntity : serviceComponentDesiredStateEntity.getHostComponentStateEntities()) {
HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
pk.setClusterId(hostComponentStateEntity.getClusterId());
pk.setServiceName(hostComponentStateEntity.getServiceName());
@@ -176,8 +191,6 @@ public class ServiceComponentImpl implements ServiceComponent {
ex.printStackTrace();
}
}
-
- persisted = true;
}
@Override
@@ -192,7 +205,9 @@ public class ServiceComponentImpl implements ServiceComponent {
*/
@Override
public boolean isRecoveryEnabled() {
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
if (desiredStateEntity != null) {
return desiredStateEntity.isRecoveryEnabled();
} else {
@@ -210,33 +225,29 @@ public class ServiceComponentImpl implements ServiceComponent {
*/
@Override
public void setRecoveryEnabled(boolean recoveryEnabled) {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting RecoveryEnabled of Component" + ", clusterName="
- + service.getCluster().getClusterName() + ", clusterId="
- + service.getCluster().getClusterId() + ", serviceName="
- + service.getName() + ", componentName=" + getName()
- + ", oldRecoveryEnabled=" + isRecoveryEnabled() + ", newRecoveryEnabled="
- + recoveryEnabled);
- }
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setRecoveryEnabled(recoveryEnabled);
- saveIfPersisted(desiredStateEntity);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting RecoveryEnabled of Component" + ", clusterName="
+ + service.getCluster().getClusterName() + ", clusterId="
+ + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+ + ", componentName=" + getName() + ", oldRecoveryEnabled=" + isRecoveryEnabled()
+ + ", newRecoveryEnabled=" + recoveryEnabled);
+ }
- // broadcast the change
- ServiceComponentRecoveryChangedEvent event = new ServiceComponentRecoveryChangedEvent(
- getClusterName(), getServiceName(), getName(), isRecoveryEnabled());
- eventPublisher.publish(event);
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + service.getName());
- }
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setRecoveryEnabled(recoveryEnabled);
+ desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
- } finally {
- readWriteLock.writeLock().unlock();
+ // broadcast the change
+ ServiceComponentRecoveryChangedEvent event = new ServiceComponentRecoveryChangedEvent(
+ getClusterName(), getServiceName(), getName(), isRecoveryEnabled());
+ eventPublisher.publish(event);
+
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + service.getName());
}
}
@@ -251,35 +262,27 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public Map<String, ServiceComponentHost> getServiceComponentHosts() {
return new HashMap<String, ServiceComponentHost>(hostComponents);
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponentHosts(
Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
- readWriteLock.writeLock().lock();
- try {
- // TODO validation
- for (Entry<String, ServiceComponentHost> entry :
- hostComponents.entrySet()) {
- if (!entry.getKey().equals(entry.getValue().getHostName())) {
- throw new AmbariException("Invalid arguments in map"
- + ", hostname does not match the key in map");
- }
- }
- for (ServiceComponentHost sch : hostComponents.values()) {
- addServiceComponentHost(sch);
+ // TODO validation
+ for (Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+ if (!entry.getKey().equals(entry.getValue().getHostName())) {
+ throw new AmbariException(
+ "Invalid arguments in map" + ", hostname does not match the key in map");
}
- } finally {
- readWriteLock.writeLock().unlock();
+ }
+
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ addServiceComponentHost(sch);
}
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponentHost(
ServiceComponentHost hostComponent) throws AmbariException {
readWriteLock.writeLock().lock();
@@ -311,7 +314,6 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHost addServiceComponentHost(String hostName) throws AmbariException {
ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
addServiceComponentHost(hostComponent);
@@ -319,23 +321,22 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHost getServiceComponentHost(String hostname)
throws AmbariException {
-
+
if (!hostComponents.containsKey(hostname)) {
throw new ServiceComponentHostNotFoundException(getClusterName(),
getServiceName(), getName(), hostname);
}
-
+
return hostComponents.get(hostname);
}
@Override
public State getDesiredState() {
- readWriteLock.readLock().lock();
- try {
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
if (desiredStateEntity != null) {
return desiredStateEntity.getDesiredState();
} else {
@@ -344,124 +345,97 @@ public class ServiceComponentImpl implements ServiceComponent {
"componentName = " + componentName);
}
- } finally {
- readWriteLock.readLock().unlock();
- }
return null;
}
@Override
public void setDesiredState(State state) {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting DesiredState of Service" + ", clusterName="
- + service.getCluster().getClusterName() + ", clusterId="
- + service.getCluster().getClusterId() + ", serviceName="
- + service.getName() + ", serviceComponentName=" + getName()
- + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
- + state);
- }
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setDesiredState(state);
- saveIfPersisted(desiredStateEntity);
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
- }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting DesiredState of Service" + ", clusterName="
+ + service.getCluster().getClusterName() + ", clusterId="
+ + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+ + ", serviceComponentName=" + getName() + ", oldDesiredState=" + getDesiredState()
+ + ", newDesiredState=" + state);
+ }
- } finally {
- readWriteLock.writeLock().unlock();
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
+ if (desiredStateEntity != null) {
+ desiredStateEntity.setDesiredState(state);
+ desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
}
}
@Override
public StackId getDesiredStackVersion() {
- readWriteLock.readLock().lock();
- try {
- StackEntity stackEntity = getDesiredStateEntity().getDesiredStack();
- if (null != stackEntity) {
- return new StackId(stackEntity.getStackName(),
- stackEntity.getStackVersion());
- } else {
- return null;
- }
- } finally {
- readWriteLock.readLock().unlock();
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
+ StackEntity stackEntity = desiredStateEntity.getDesiredStack();
+ if (null != stackEntity) {
+ return new StackId(stackEntity.getStackName(), stackEntity.getStackVersion());
+ } else {
+ return null;
}
}
@Override
public void setDesiredStackVersion(StackId stack) {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
- + service.getCluster().getClusterName() + ", clusterId="
- + service.getCluster().getClusterId() + ", serviceName="
- + service.getName() + ", serviceComponentName=" + getName()
- + ", oldDesiredStackVersion=" + getDesiredStackVersion()
- + ", newDesiredStackVersion=" + stack);
- }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
+ + service.getCluster().getClusterName() + ", clusterId="
+ + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+ + ", serviceComponentName=" + getName() + ", oldDesiredStackVersion="
+ + getDesiredStackVersion() + ", newDesiredStackVersion=" + stack);
+ }
- StackEntity stackEntity = stackDAO.find(stack.getStackName(),
- stack.getStackVersion());
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- if (desiredStateEntity != null) {
- desiredStateEntity.setDesiredStack(stackEntity);
- saveIfPersisted(desiredStateEntity);
- } else {
- LOG.warn("Setting a member on an entity object that may have been " +
- "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
- }
- } finally {
- readWriteLock.writeLock().unlock();
+ if (desiredStateEntity != null) {
+ StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
+ desiredStateEntity.setDesiredStack(stackEntity);
+ desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
+ } else {
+ LOG.warn("Setting a member on an entity object that may have been "
+ + "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
}
}
@Override
public String getDesiredVersion() {
- readWriteLock.readLock().lock();
- try {
- return getDesiredStateEntity().getDesiredVersion();
- } finally {
- readWriteLock.readLock().unlock();
- }
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
+ return desiredStateEntity.getDesiredVersion();
}
@Override
public void setDesiredVersion(String version) {
- readWriteLock.writeLock().lock();
- try {
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
if (desiredStateEntity != null) {
desiredStateEntity.setDesiredVersion(version);
- saveIfPersisted(desiredStateEntity);
+ desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
} else {
LOG.warn("Setting a member on an entity object that may have been " +
"previously deleted, serviceName = " + (service != null ? service.getName() : ""));
}
-
- } finally {
- readWriteLock.writeLock().unlock();
- }
}
@Override
public ServiceComponentResponse convertToResponse() {
- readWriteLock.readLock().lock();
- try {
- Cluster cluster = service.getCluster();
- ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
- cluster.getClusterName(), service.getName(), getName(),
- getDesiredStackVersion().getStackId(), getDesiredState().toString(),
- getServiceComponentStateCount(), isRecoveryEnabled(), displayName);
- return r;
- } finally {
- readWriteLock.readLock().unlock();
- }
+ Cluster cluster = service.getCluster();
+ ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
+ cluster.getClusterName(), service.getName(), getName(),
+ getDesiredStackVersion().getStackId(), getDesiredState().toString(),
+ getServiceComponentStateCount(), isRecoveryEnabled(), displayName);
+ return r;
}
@Override
@@ -472,107 +446,37 @@ public class ServiceComponentImpl implements ServiceComponent {
@Override
public void debugDump(StringBuilder sb) {
- readWriteLock.readLock().lock();
- try {
- sb.append("ServiceComponent={ serviceComponentName=" + getName()
- + ", recoveryEnabled=" + isRecoveryEnabled()
- + ", clusterName=" + service.getCluster().getClusterName()
- + ", clusterId=" + service.getCluster().getClusterId()
- + ", serviceName=" + service.getName() + ", desiredStackVersion="
- + getDesiredStackVersion() + ", desiredState="
- + getDesiredState().toString() + ", hostcomponents=[ ");
- boolean first = true;
- for (ServiceComponentHost sch : hostComponents.values()) {
- if (!first) {
- sb.append(" , ");
- first = false;
- }
- sb.append("\n ");
- sch.debugDump(sb);
- sb.append(" ");
- }
- sb.append(" ] }");
- } finally {
- readWriteLock.readLock().unlock();
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isPersisted() {
- // a lock around this internal state variable is not required since we
- // have appropriate locks in the persist() method and this member is
- // only ever false under the condition that the object is new
- return persisted;
- }
-
- /**
- * {@inheritDoc}
- * <p/>
- * This method uses Java locks and then delegates to internal methods which
- * perform the JPA merges inside of a transaction. Because of this, a
- * transaction is not necessary before this calling this method.
- */
- @Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
- public void persist() {
- readWriteLock.writeLock().lock();
- try {
- if (!persisted) {
- // persist the new cluster topology
- persistEntities();
- refresh();
- persisted = true;
- } else {
- saveIfPersisted(desiredStateEntity);
+ sb.append("ServiceComponent={ serviceComponentName=" + getName() + ", recoveryEnabled="
+ + isRecoveryEnabled() + ", clusterName=" + service.getCluster().getClusterName()
+ + ", clusterId=" + service.getCluster().getClusterId() + ", serviceName="
+ + service.getName() + ", desiredStackVersion=" + getDesiredStackVersion()
+ + ", desiredState=" + getDesiredState().toString() + ", hostcomponents=[ ");
+ boolean first = true;
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ if (!first) {
+ sb.append(" , ");
+ first = false;
}
- } finally {
- readWriteLock.writeLock().unlock();
+ sb.append("\n ");
+ sch.debugDump(sb);
+ sb.append(" ");
}
+ sb.append(" ] }");
}
@Transactional
- protected void persistEntities() {
+ protected void persistEntities(ServiceComponentDesiredStateEntity desiredStateEntity) {
ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
pk.setClusterId(service.getClusterId());
pk.setServiceName(service.getName());
ClusterServiceEntity serviceEntity = clusterServiceDAO.findByPK(pk);
- ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
desiredStateEntity.setClusterServiceEntity(serviceEntity);
-
serviceComponentDesiredStateDAO.create(desiredStateEntity);
serviceEntity = clusterServiceDAO.merge(serviceEntity);
}
@Override
- @Transactional
- public void refresh() {
- readWriteLock.writeLock().lock();
- try {
- if (isPersisted()) {
- serviceComponentDesiredStateDAO.refresh(getDesiredStateEntity());
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- /**
- * Merges the encapsulated {@link ServiceComponentDesiredStateEntity} inside
- * of a new transaction. This method assumes that the appropriate write lock
- * has already been acquired from {@link #readWriteLock}.
- */
- @Transactional
- void saveIfPersisted(ServiceComponentDesiredStateEntity desiredStateEntity) {
- if (isPersisted()) {
- desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
- }
- }
-
- @Override
public boolean isClientComponent() {
return isClientComponent;
}
@@ -589,31 +493,24 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
- readWriteLock.readLock().lock();
- try {
- // A component can be deleted if all it's host components
- // can be removed, irrespective of the state of
- // the component itself
- for (ServiceComponentHost sch : hostComponents.values()) {
- if (!sch.canBeRemoved()) {
- LOG.warn("Found non removable hostcomponent when trying to" + " delete service component"
- + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName()
- + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname="
- + sch.getHostName());
- return false;
- }
+ // A component can be deleted if all it's host components
+ // can be removed, irrespective of the state of
+ // the component itself
+ for (ServiceComponentHost sch : hostComponents.values()) {
+ if (!sch.canBeRemoved()) {
+ LOG.warn("Found non removable hostcomponent when trying to" + " delete service component"
+ + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName()
+ + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname="
+ + sch.getHostName());
+ return false;
}
- return true;
- } finally {
- readWriteLock.readLock().unlock();
}
+ return true;
}
@Override
@Transactional
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteAllServiceComponentHosts() throws AmbariException {
readWriteLock.writeLock().lock();
try {
@@ -640,7 +537,6 @@ public class ServiceComponentImpl implements ServiceComponent {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteServiceComponentHosts(String hostname) throws AmbariException {
readWriteLock.writeLock().lock();
try {
@@ -666,25 +562,21 @@ public class ServiceComponentImpl implements ServiceComponent {
@Override
@Transactional
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() throws AmbariException {
readWriteLock.writeLock().lock();
try {
deleteAllServiceComponentHosts();
- if (persisted) {
- removeEntities();
- persisted = false;
- }
+ ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+ desiredStateEntityId);
+
+ serviceComponentDesiredStateDAO.remove(desiredStateEntity);
+
} finally {
readWriteLock.writeLock().unlock();
}
}
- @Transactional
- protected void removeEntities() throws AmbariException {
- serviceComponentDesiredStateDAO.remove(getDesiredStateEntity());
- }
private int getSCHCountByState(State state) {
int count = 0;
@@ -706,13 +598,4 @@ public class ServiceComponentImpl implements ServiceComponent {
serviceComponentStateCountMap.put("totalCount", hostComponents.size());
return serviceComponentStateCountMap;
}
-
- // Refresh cached reference after ever setter
- private ServiceComponentDesiredStateEntity getDesiredStateEntity() {
- if (!isPersisted()) {
- return desiredStateEntity;
- }
-
- return serviceComponentDesiredStateDAO.findById(desiredStateEntity.getId());
- }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index e4adc97..9b56059 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -27,8 +27,6 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ServiceComponentNotFoundException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -194,7 +192,6 @@ public class ServiceImpl implements Service {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponents(
Map<String, ServiceComponent> components) throws AmbariException {
for (ServiceComponent sc : components.values()) {
@@ -203,7 +200,6 @@ public class ServiceImpl implements Service {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void addServiceComponent(ServiceComponent component) throws AmbariException {
if (components.containsKey(component.getName())) {
throw new AmbariException("Cannot add duplicate ServiceComponent"
@@ -217,7 +213,6 @@ public class ServiceImpl implements Service {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponent addServiceComponent(String serviceComponentName)
throws AmbariException {
ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
@@ -345,7 +340,6 @@ public class ServiceImpl implements Service {
/**
*
*/
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
private void persist(ClusterServiceEntity serviceEntity) {
persistEntities(serviceEntity);
refresh();
@@ -382,7 +376,6 @@ public class ServiceImpl implements Service {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
//
// A service can be deleted if all it's components
@@ -439,7 +432,6 @@ public class ServiceImpl implements Service {
@Override
@Transactional
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteAllComponents() throws AmbariException {
lock.lock();
try {
@@ -465,7 +457,6 @@ public class ServiceImpl implements Service {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteServiceComponent(String componentName)
throws AmbariException {
lock.lock();
@@ -495,7 +486,6 @@ public class ServiceImpl implements Service {
@Override
@Transactional
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() throws AmbariException {
deleteAllComponents();
deleteAllServiceConfigs();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 848036d..5566343 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -396,7 +396,8 @@ public class ClusterImpl implements Cluster {
String hostname = svchost.getKey();
ServiceComponentHost svcHostComponent = svchost.getValue();
if (!serviceComponentHostsByHost.containsKey(hostname)) {
- serviceComponentHostsByHost.put(hostname, new ArrayList<ServiceComponentHost>());
+ serviceComponentHostsByHost.put(hostname,
+ new CopyOnWriteArrayList<ServiceComponentHost>());
}
List<ServiceComponentHost> compList = serviceComponentHostsByHost.get(hostname);
@@ -670,7 +671,7 @@ public class ClusterImpl implements Cluster {
if (!serviceComponentHostsByHost.containsKey(hostname)) {
serviceComponentHostsByHost.put(hostname,
- new ArrayList<ServiceComponentHost>());
+ new CopyOnWriteArrayList<ServiceComponentHost>());
}
if (LOG.isDebugEnabled()) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index bed33d9..706a476 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -28,6 +28,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
import javax.persistence.RollbackException;
@@ -413,10 +414,10 @@ public class ClustersImpl implements Clusters {
// not stored to DB
Host host = hostFactory.create(hostEntity);
host.setAgentVersion(new AgentVersion(""));
- List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
+ List<DiskInfo> emptyDiskList = new CopyOnWriteArrayList<DiskInfo>();
host.setDisksInfo(emptyDiskList);
host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
- host.setHostAttributes(new HashMap<String, String>());
+ host.setHostAttributes(new ConcurrentHashMap<String, String>());
host.setState(HostState.INIT);
// the hosts by ID map is updated separately since the host has not yet
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 286b5ca..513d924 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -18,12 +18,12 @@
package org.apache.ambari.server.state.host;
import java.lang.reflect.Type;
-import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -141,7 +141,7 @@ public class HostImpl implements Host {
private long lastHeartbeatTime = 0L;
private AgentEnv lastAgentEnv = null;
- private List<DiskInfo> disksInfo = new ArrayList<DiskInfo>();
+ private List<DiskInfo> disksInfo = new CopyOnWriteArrayList<DiskInfo>();
private RecoveryReport recoveryReport = new RecoveryReport();
private Integer currentPingPort = null;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index a575456..a6f5040 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -30,8 +30,6 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.agent.AlertDefinitionCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -1028,7 +1026,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void handleEvent(ServiceComponentHostEvent event)
throws InvalidStateTransitionException {
if (LOG.isDebugEnabled()) {
@@ -1344,7 +1341,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
readLock.lock();
try {
@@ -1538,7 +1534,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
boolean schLockAcquired = false;
try {
@@ -1555,7 +1550,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
}
@Override
- @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() {
boolean fireRemovalEvent = false;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 64305ff..dcffece 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -159,11 +159,11 @@ public class HeartbeatProcessorTest {
public void testHeartbeatWithConfigs() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -228,7 +228,7 @@ public class HeartbeatProcessorTest {
public void testRestartRequiredAfterInstallClient() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -293,11 +293,11 @@ public class HeartbeatProcessorTest {
public void testHeartbeatCustomCommandWithConfigs() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -377,11 +377,11 @@ public class HeartbeatProcessorTest {
public void testHeartbeatCustomStartStop() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -461,11 +461,11 @@ public class HeartbeatProcessorTest {
public void testStatusHeartbeat() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -590,7 +590,7 @@ public class HeartbeatProcessorTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -709,7 +709,7 @@ public class HeartbeatProcessorTest {
public void testUpgradeSpecificHandling() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -803,7 +803,7 @@ public class HeartbeatProcessorTest {
public void testCommandStatusProcesses() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -884,11 +884,11 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeCompleteReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
@@ -968,11 +968,11 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeFailReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
@@ -1088,11 +1088,11 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeInProgressReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
@@ -1294,10 +1294,10 @@ public class HeartbeatProcessorTest {
public void testComponentInProgressStatusSafeAfterStatusReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).
addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).
addServiceComponentHost(DummyHostname1).persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 6205f59..cd41929 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -221,9 +221,9 @@ public class TestHeartbeatHandler {
public void testStatusHeartbeatWithAnnotation() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(DATANODE);
+ hdfs.addServiceComponent(NAMENODE);
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
ActionQueue aq = new ActionQueue();
@@ -272,10 +272,10 @@ public class TestHeartbeatHandler {
public void testLiveStatusUpdateAfterStopFailed() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).
addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).
addServiceComponentHost(DummyHostname1).persist();
@@ -385,14 +385,14 @@ public class TestHeartbeatHandler {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
+ hdfs.getServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).persist();
+ hdfs.getServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
Host hostObject = clusters.getHost(DummyHostname1);
@@ -451,15 +451,15 @@ public class TestHeartbeatHandler {
* Add three service components enabled for auto start.
*/
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
+ hdfs.getServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).persist();
+ hdfs.getServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(HDFS_CLIENT).setRecoveryEnabled(true);
- hdfs.getServiceComponent(HDFS_CLIENT).persist();
+ hdfs.getServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
Host hostObject = clusters.getHost(DummyHostname1);
@@ -780,11 +780,11 @@ public class TestHeartbeatHandler {
public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -837,11 +837,11 @@ public class TestHeartbeatHandler {
public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+ hdfs.addServiceComponent(SECONDARY_NAMENODE);
hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
ActionQueue aq = new ActionQueue();
@@ -911,11 +911,11 @@ public class TestHeartbeatHandler {
public void testStatusHeartbeatWithVersion() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(HDFS_CLIENT).persist();
+ hdfs.addServiceComponent(HDFS_CLIENT);
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
@@ -988,9 +988,9 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -1069,9 +1069,9 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
- hdfs.addServiceComponent(NAMENODE).persist();
+ hdfs.addServiceComponent(NAMENODE);
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -1403,7 +1403,7 @@ public class TestHeartbeatHandler {
public void testCommandStatusProcesses_empty() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.addServiceComponent(DATANODE).persist();
+ hdfs.addServiceComponent(DATANODE);
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index e6a3ee6..c4d735d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -168,11 +168,11 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -260,16 +260,16 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
(hostname1).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost
(hostname1).persist();
- hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).
addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
(hostname1).persist();
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
@@ -368,11 +368,11 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -408,12 +408,12 @@ public class TestHeartbeatMonitor {
hm.start();
Thread.sleep(3 * heartbeatMonitorWakeupIntervalMS);
hm.shutdown();
-
+
int tryNumber = 0;
while(hm.isAlive()) {
hm.join(2*heartbeatMonitorWakeupIntervalMS);
tryNumber++;
-
+
if(tryNumber >= 5) {
fail("HeartbeatMonitor should be already stopped");
}
@@ -450,13 +450,13 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(hostname1);
ActionQueue aq = new ActionQueue();
@@ -570,11 +570,11 @@ public class TestHeartbeatMonitor {
Service hdfs = cluster.addService(serviceName);
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
- hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+ hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 351d473..b082145 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -137,7 +137,6 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
// Get the recovery configuration
@@ -146,7 +145,6 @@ public class RecoveryConfigHelperTest {
// Install HDFS::NAMENODE to trigger a component installed event
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).persist();
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
// Verify that the config is stale now
@@ -172,11 +170,9 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).persist();
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
// Get the recovery configuration
@@ -209,7 +205,6 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setDesiredState(State.INSTALLED);
@@ -249,11 +244,9 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(NAMENODE).persist();
hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
// Get the recovery configuration
@@ -285,7 +278,6 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
// Get the recovery configuration
@@ -327,7 +319,6 @@ public class RecoveryConfigHelperTest {
Service hdfs = cluster.addService(HDFS);
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
- hdfs.getServiceComponent(DATANODE).persist();
// Add SCH to Host1 and Host2
hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1").persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index d61a3e7..7b3f671 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -1637,13 +1637,10 @@ public class AmbariManagementControllerTest {
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "NAMENODE");
s1.addServiceComponent(sc1);
- sc1.persist();
ServiceComponent sc2 = serviceComponentFactory.createNew(s2, "NAMENODE");
s2.addServiceComponent(sc2);
- sc2.persist();
ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "NAMENODE");
s3.addServiceComponent(sc3);
- sc3.persist();
try {
@@ -2365,7 +2362,6 @@ public class AmbariManagementControllerTest {
s1.setDesiredState(State.INSTALLED);
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
s1.addServiceComponent(sc1);
- sc1.persist();
sc1.setDesiredStackVersion(new StackId("HDP-0.1"));
sc1.setDesiredState(State.UNINSTALLED);
@@ -2443,15 +2439,6 @@ public class AmbariManagementControllerTest {
sc7.setDesiredState(State.UNINSTALLED);
sc8.setDesiredState(State.UNINSTALLED);
- sc1.persist();
- sc2.persist();
- sc3.persist();
- sc4.persist();
- sc5.persist();
- sc6.persist();
- sc7.persist();
- sc8.persist();
-
ServiceComponentRequest r = new ServiceComponentRequest(null, null,
null, null);
@@ -2529,7 +2516,6 @@ public class AmbariManagementControllerTest {
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
s1.addServiceComponent(sc1);
sc1.setDesiredState(State.UNINSTALLED);
- sc1.persist();
ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, host1);
sc1.addServiceComponentHost(sch1);
sch1.setDesiredState(State.INSTALLED);
@@ -2953,10 +2939,6 @@ public class AmbariManagementControllerTest {
sc1.setDesiredState(State.UNINSTALLED);
sc3.setDesiredState(State.UNINSTALLED);
- sc1.persist();
- sc2.persist();
- sc3.persist();
-
ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, host1);
ServiceComponentHost sch2 = serviceComponentHostFactory.createNew(sc1, host2);
ServiceComponentHost sch3 = serviceComponentHostFactory.createNew(sc1, host3);
@@ -4260,11 +4242,11 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service mapred = cluster.addService("YARN");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ hdfs.addServiceComponent(Role.NAMENODE.name());
+ hdfs.addServiceComponent(Role.DATANODE.name());
- mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+ mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
@@ -4425,9 +4407,9 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ hdfs.addServiceComponent(Role.NAMENODE.name());
+ hdfs.addServiceComponent(Role.DATANODE.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
@@ -4535,11 +4517,11 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service hive = cluster.addService("HIVE");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ hdfs.addServiceComponent(Role.NAMENODE.name());
+ hdfs.addServiceComponent(Role.DATANODE.name());
- hive.addServiceComponent(Role.HIVE_SERVER.name()).persist();
+ hive.addServiceComponent(Role.HIVE_SERVER.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
@@ -4819,8 +4801,8 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service mapReduce = cluster.addService("MAPREDUCE");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2).persist();
@@ -6498,11 +6480,11 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service mapred = cluster.addService("YARN");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ hdfs.addServiceComponent(Role.NAMENODE.name());
+ hdfs.addServiceComponent(Role.DATANODE.name());
- mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+ mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
@@ -6604,11 +6586,11 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service mapred = cluster.addService("YARN");
- hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
- hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
- hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+ hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+ hdfs.addServiceComponent(Role.NAMENODE.name());
+ hdfs.addServiceComponent(Role.DATANODE.name());
- mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+ mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index 4066d8f..43de27f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -387,7 +387,6 @@ public class EventsTest {
ServiceComponent component = m_componentFactory.createNew(service, "DATANODE");
service.addServiceComponent(component);
component.setDesiredState(State.INSTALLED);
- component.persist();
ServiceComponentHost sch = m_schFactory.createNew(component, HOSTNAME);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 1d1f77e..fffda6c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -438,7 +438,6 @@ public class OrmTestHelper {
service.addServiceComponent(datanode);
datanode.setDesiredState(State.INSTALLED);
- datanode.persist();
ServiceComponentHost sch = schFactory.createNew(datanode, hostName);
@@ -454,7 +453,6 @@ public class OrmTestHelper {
service.addServiceComponent(namenode);
namenode.setDesiredState(State.INSTALLED);
- namenode.persist();
sch = schFactory.createNew(namenode, hostName);
namenode.addServiceComponentHost(sch);
@@ -483,7 +481,6 @@ public class OrmTestHelper {
service.addServiceComponent(resourceManager);
resourceManager.setDesiredState(State.INSTALLED);
- resourceManager.persist();
ServiceComponentHost sch = schFactory.createNew(resourceManager, hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 3a597fd..6d19113 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -389,7 +389,6 @@ public class ComponentVersionCheckActionTest {
serviceComponent = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 30eb149..60027c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -1034,7 +1034,6 @@ public class UpgradeActionTest {
serviceComponent = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(serviceComponent);
serviceComponent.setDesiredState(State.INSTALLED);
- serviceComponent.persist();
}
return serviceComponent;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d6a84715/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 8d16e3f..59bb393 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -118,7 +118,6 @@ public class ServiceComponentTest {
ServiceComponent component = serviceComponentFactory.createNew(service,
componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -141,7 +140,6 @@ public class ServiceComponentTest {
ServiceComponent component = serviceComponentFactory.createNew(service,
componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -198,7 +196,6 @@ public class ServiceComponentTest {
ServiceComponent component = serviceComponentFactory.createNew(service,
componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -299,7 +296,6 @@ public class ServiceComponentTest {
ServiceComponent component = serviceComponentFactory.createNew(service,
componentName);
service.addServiceComponent(component);
- component.persist();
addHostToCluster("h1", service.getCluster().getClusterName());
ServiceComponentHost sch =
@@ -376,7 +372,6 @@ public class ServiceComponentTest {
String componentName = "NAMENODE";
ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -427,7 +422,6 @@ public class ServiceComponentTest {
String componentName = "NAMENODE";
ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -495,7 +489,6 @@ public class ServiceComponentTest {
String componentName = "NAMENODE";
ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -562,7 +555,6 @@ public class ServiceComponentTest {
String componentName = "NAMENODE";
ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
@@ -613,7 +605,6 @@ public class ServiceComponentTest {
String componentName = "NAMENODE";
ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
service.addServiceComponent(component);
- component.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
[18/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5cf5c834
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5cf5c834
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5cf5c834
Branch: refs/heads/trunk
Commit: 5cf5c8349cef6e4605870ac59dd620db2053b02c
Parents: 0de69e1 72f1f6f
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sat Oct 8 23:10:24 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Oct 8 23:10:24 2016 -0400
----------------------------------------------------------------------
.../FALCON/0.5.0.2.1/package/scripts/falcon.py | 11 +
.../4.0.0.2.0/package/scripts/oozie_service.py | 2 +-
.../internal/UserResourceProviderDBTest.java | 280 +++++++++++++++++++
.../stacks/2.1/FALCON/test_falcon_server.py | 14 +
4 files changed, 306 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
[09/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5467ad07
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5467ad07
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5467ad07
Branch: refs/heads/trunk
Commit: 5467ad0737bdadf23e0cec752acdbdc1b250de3c
Parents: 23fbfe4 a442efb
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 4 19:59:16 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 4 19:59:16 2016 -0400
----------------------------------------------------------------------
.../timeline/AbstractTimelineMetricsSink.java | 75 +++---
.../availability/MetricCollectorHAHelper.java | 19 +-
.../availability/MetricCollectorHATest.java | 34 ++-
.../src/main/python/core/blacklisted_set.py | 14 ++
.../src/main/python/core/config_reader.py | 3 +-
.../src/main/python/core/emitter.py | 74 ++++--
.../server/controller/KerberosHelperImpl.java | 243 ++++++++++++++-----
.../ConfigureAmbariIdentitiesServerAction.java | 190 +++++++++++----
.../kerberos/FinalizeKerberosServerAction.java | 118 ++++++++-
.../kerberos/KerberosOperationHandler.java | 2 +-
.../server/upgrade/UpgradeCatalog250.java | 30 ++-
.../ambari/server/utils/ShellCommandUtil.java | 121 +++++++++
.../resources/stacks/HDP/2.0.6/kerberos.json | 14 +-
.../server/controller/KerberosHelperTest.java | 193 ++++++++++++---
...nfigureAmbariIdentitiesServerActionTest.java | 204 ++++++++++++++++
.../server/upgrade/UpgradeCatalog250Test.java | 61 +++++
ambari-web/app/assets/test/tests.js | 1 +
.../nameNode/step2_controller.js | 32 ++-
.../main/admin/stack_and_upgrade_controller.js | 45 +++-
.../app/mappers/repository_version_mapper.js | 28 ++-
ambari-web/app/messages.js | 1 +
.../models/stack_version/repository_version.js | 6 +
.../admin/highAvailability/nameNode/step3.hbs | 10 +-
.../admin/stack_upgrade/upgrade_version_box.hbs | 2 +-
.../stack_upgrade/upgrade_version_column.hbs | 2 +-
ambari-web/app/utils/ajax/ajax.js | 5 +
.../stack_upgrade/upgrade_version_box_view.js | 6 +
.../upgrade_version_column_view.js | 6 +-
.../admin/stack_and_upgrade_controller_test.js | 74 +++++-
.../mappers/repository_version_mapper_test.js | 46 ++++
.../upgrade_version_box_view_test.js | 23 +-
31 files changed, 1447 insertions(+), 235 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5467ad07/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 6ac607d,b2eb738..fd5afcd
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@@ -18,39 -18,10 +18,42 @@@
package org.apache.ambari.server.controller;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
++import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
++import java.io.File;
++import java.lang.reflect.Method;
+import java.net.InetAddress;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.actionmanager.ActionManager;
@@@ -121,14 -96,45 +127,16 @@@ import org.easymock.CaptureType
import org.easymock.EasyMockSupport;
import org.easymock.IAnswer;
import org.junit.After;
++import org.junit.Assert;
import org.junit.Before;
+ import org.junit.Rule;
import org.junit.Test;
+ import org.junit.rules.TemporaryFolder;
-import javax.persistence.EntityManager;
-import java.io.File;
-import java.lang.reflect.Method;
-import java.net.InetAddress;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
- import junit.framework.Assert;
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.getCurrentArguments;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
@SuppressWarnings("unchecked")
public class KerberosHelperTest extends EasyMockSupport {
[08/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/23fbfe48
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/23fbfe48
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/23fbfe48
Branch: refs/heads/trunk
Commit: 23fbfe48021fdd739ecc882698bb70fcadb8423b
Parents: aee49b6 98ba7e0
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 4 09:42:38 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 4 09:42:38 2016 -0400
----------------------------------------------------------------------
ambari-logsearch/pom.xml | 13 +++
.../ambari-metrics-timelineservice/pom.xml | 9 ++
ambari-metrics/pom.xml | 26 +++++-
ambari-server/pom.xml | 11 +++
.../src/main/python/ambari_server/utils.py | 42 ++++++++-
.../src/main/python/ambari_server_main.py | 12 ++-
.../HIVE/configuration/hive-interactive-env.xml | 11 +++
.../stacks/HDP/2.5/services/stack_advisor.py | 23 ++++-
.../alerts/AggregateAlertListenerTest.java | 2 +
.../alerts/AlertDefinitionEqualityTest.java | 2 +
.../state/alerts/AlertDefinitionHashTest.java | 2 +
.../state/alerts/AlertEventPublisherTest.java | 2 +
.../state/alerts/AlertReceivedListenerTest.java | 2 +
.../alerts/AlertStateChangedEventTest.java | 2 +
.../state/alerts/InitialAlertEventTest.java | 2 +
.../KerberosComponentDescriptorTest.java | 2 +
.../KerberosConfigurationDescriptorTest.java | 2 +
.../state/kerberos/KerberosDescriptorTest.java | 2 +
.../KerberosDescriptorUpdateHelperTest.java | 2 +
.../KerberosIdentityDescriptorTest.java | 2 +
.../kerberos/KerberosKeytabDescriptorTest.java | 2 +
.../KerberosPrincipalDescriptorTest.java | 2 +
.../kerberos/KerberosServiceDescriptorTest.java | 2 +
.../kerberos/VariableReplacementHelperTest.java | 2 +
.../state/stack/ConfigUpgradePackTest.java | 2 +
.../state/stack/ConfigUpgradeValidityTest.java | 2 +
.../ambari/server/state/stack/OSFamilyTest.java | 3 +
.../server/state/stack/UpgradePackTest.java | 2 +
ambari-server/src/test/python/TestUtils.py | 12 ++-
pom.xml | 92 +++++++++++++++++++-
utility/pom.xml | 19 ++++
utility/src/main/java/category/AlertTest.java | 27 ++++++
.../main/java/category/AmbariUpgradeTest.java | 27 ++++++
.../src/main/java/category/BlueprintTest.java | 27 ++++++
utility/src/main/java/category/FastTest.java | 27 ++++++
.../src/main/java/category/KerberosTest.java | 27 ++++++
utility/src/main/java/category/MetricsTest.java | 27 ++++++
utility/src/main/java/category/SlowTest.java | 27 ++++++
.../main/java/category/StackUpgradeTest.java | 27 ++++++
39 files changed, 514 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
[17/32] ambari git commit: AMBARI-18556 - Remove Unnecessary Locks
Inside Of Service Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-18556 - Remove Unnecessary Locks Inside Of Service Business Object Implementations (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0de69e10
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0de69e10
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0de69e10
Branch: refs/heads/trunk
Commit: 0de69e10b30a1dadf6f508170548cd347095193a
Parents: fbfcf98
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Oct 7 15:59:42 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Oct 8 21:19:35 2016 -0400
----------------------------------------------------------------------
.../ambari/server/api/services/BaseRequest.java | 49 +-
.../controller/AmbariManagementController.java | 8 -
.../AmbariManagementControllerImpl.java | 5 -
.../internal/ServiceResourceProvider.java | 38 +-
.../orm/dao/HostComponentDesiredStateDAO.java | 7 +-
.../server/orm/dao/HostComponentStateDAO.java | 9 +-
.../apache/ambari/server/orm/dao/HostDAO.java | 2 +-
.../orm/entities/ClusterServiceEntity.java | 2 +-
.../HostComponentDesiredStateEntity.java | 11 +
.../orm/entities/HostComponentStateEntity.java | 11 +
.../org/apache/ambari/server/state/Host.java | 2 -
.../org/apache/ambari/server/state/Service.java | 6 -
.../apache/ambari/server/state/ServiceImpl.java | 448 +++++++------------
.../server/state/cluster/ClustersImpl.java | 4 -
.../ambari/server/state/host/HostImpl.java | 15 -
.../svccomphost/ServiceComponentHostImpl.java | 18 +-
.../apache/ambari/server/utils/RetryHelper.java | 1 +
.../server/agent/HeartbeatProcessorTest.java | 12 -
.../server/agent/TestHeartbeatHandler.java | 10 -
.../server/agent/TestHeartbeatMonitor.java | 5 -
.../server/api/services/AmbariMetaInfoTest.java | 3 +
.../configuration/RecoveryConfigHelperTest.java | 6 -
.../AmbariManagementControllerTest.java | 43 --
.../PreUpgradeCheckResourceProviderTest.java | 52 +--
.../internal/ServiceResourceProviderTest.java | 63 ++-
.../UpgradeResourceProviderHDP22Test.java | 1 -
.../internal/UpgradeResourceProviderTest.java | 2 -
.../UpgradeSummaryResourceProviderTest.java | 1 -
.../GeneralServiceCalculatedStateTest.java | 47 +-
.../apache/ambari/server/events/EventsTest.java | 1 -
.../apache/ambari/server/orm/OrmTestHelper.java | 2 -
.../dao/HostComponentDesiredStateDAOTest.java | 27 +-
.../orm/dao/HostComponentStateDAOTest.java | 28 +-
.../ComponentVersionCheckActionTest.java | 1 -
.../upgrades/ConfigureActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 1 -
.../server/state/ServiceComponentTest.java | 1 -
.../apache/ambari/server/state/ServiceTest.java | 4 -
.../state/alerts/AlertEventPublisherTest.java | 7 +-
.../state/alerts/InitialAlertEventTest.java | 3 +-
.../state/cluster/ClusterDeadlockTest.java | 1 -
.../server/state/cluster/ClusterImplTest.java | 2 -
.../server/state/cluster/ClusterTest.java | 22 +-
.../state/cluster/ClustersDeadlockTest.java | 1 -
.../server/state/cluster/ClustersTest.java | 1 -
.../ConcurrentServiceConfigVersionTest.java | 1 -
...omponentHostConcurrentWriteDeadlockTest.java | 1 -
.../svccomphost/ServiceComponentHostTest.java | 1 -
48 files changed, 340 insertions(+), 647 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
index 9f7b949..73af2c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
@@ -18,40 +18,39 @@
package org.apache.ambari.server.api.services;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.UriInfo;
+
import org.apache.ambari.server.api.handlers.RequestHandler;
import org.apache.ambari.server.api.predicate.InvalidQueryException;
import org.apache.ambari.server.api.predicate.PredicateCompiler;
import org.apache.ambari.server.api.predicate.QueryLexer;
import org.apache.ambari.server.api.query.render.Renderer;
import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.internal.SortRequestImpl;
import org.apache.ambari.server.controller.internal.PageRequestImpl;
+import org.apache.ambari.server.controller.internal.SortRequestImpl;
import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.spi.SortRequest;
import org.apache.ambari.server.controller.spi.PageRequest;
import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.SortRequest;
import org.apache.ambari.server.controller.spi.SortRequestProperty;
import org.apache.ambari.server.controller.spi.TemporalInfo;
import org.apache.ambari.server.utils.RequestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.springframework.web.context.request.RequestContextHolder;
-import org.springframework.web.context.request.ServletRequestAttributes;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Collection;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
/**
* Request implementation.
@@ -144,11 +143,13 @@ public abstract class BaseRequest implements Request {
parseQueryPredicate();
result = getRequestHandler().handleRequest(this);
} catch (InvalidQueryException e) {
- result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
- "Unable to compile query predicate: " + e.getMessage()));
+ String message = "Unable to compile query predicate: " + e.getMessage();
+ LOG.error(message, e);
+ result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, message));
} catch (IllegalArgumentException e) {
- result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
- "Invalid Request: " + e.getMessage()));
+ String message = "Invalid Request: " + e.getMessage();
+ LOG.error(message, e);
+ result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, message));
}
if (! result.getStatus().isErrorState()) {
@@ -322,7 +323,7 @@ public abstract class BaseRequest implements Request {
if (queryString != null) {
try {
Collection<String> ignoredProperties = null;
- switch (this.getRequestType()) {
+ switch (getRequestType()) {
case PUT:
ignoredProperties = m_resource.getResourceDefinition().getUpdateDirectives();
break;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index 746bca4..7418c1b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -54,7 +54,6 @@ import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentFactory;
import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.State;
@@ -550,13 +549,6 @@ public interface AmbariManagementController {
AmbariMetaInfo getAmbariMetaInfo();
/**
- * Get the service factory for this management controller.
- *
- * @return the service factory
- */
- ServiceFactory getServiceFactory();
-
- /**
* Get the service component factory for this management controller.
*
* @return the service component factory
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index ac680a5..9390a45 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -4705,11 +4705,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
@Override
- public ServiceFactory getServiceFactory() {
- return serviceFactory;
- }
-
- @Override
public ServiceComponentFactory getServiceComponentFactory() {
return serviceComponentFactory;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 56196c1..13f822e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -17,9 +17,18 @@
*/
package org.apache.ambari.server.controller.internal;
-import com.google.inject.Inject;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ClusterNotFoundException;
import org.apache.ambari.server.DuplicateResourceException;
@@ -60,23 +69,14 @@ import org.apache.ambari.server.state.MaintenanceState;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Inject;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
/**
* Resource provider for service resources.
@@ -344,18 +344,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
// do all validation checks
validateCreateRequests(requests, clusters);
- ServiceFactory serviceFactory = getManagementController().getServiceFactory();
for (ServiceRequest request : requests) {
Cluster cluster = clusters.getCluster(request.getClusterName());
- State state = State.INIT;
-
// Already checked that service does not exist
- Service s = serviceFactory.createNew(cluster, request.getServiceName());
+ Service s = cluster.addService(request.getServiceName());
- s.setDesiredState(state);
- s.setDesiredStackVersion(cluster.getDesiredStackVersion());
- s.persist();
// Initialize service widgets
getManagementController().initializeWidgetsAndLayouts(cluster, s);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
index 176e15b..876b1cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
@@ -95,17 +95,16 @@ public class HostComponentDesiredStateDAO {
@Transactional
public void remove(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
- HostEntity hostEntity = hostDAO.findById(hostComponentDesiredStateEntity.getHostId());
+ HostEntity hostEntity = hostComponentDesiredStateEntity.getHostEntity();
if (hostEntity == null) {
throw new IllegalStateException(String.format("Missing hostEntity for host id %1d",
hostComponentDesiredStateEntity.getHostId()));
}
- entityManagerProvider.get().remove(merge(hostComponentDesiredStateEntity));
-
- // Make sure that the state entity is removed from its host entity
hostEntity.removeHostComponentDesiredStateEntity(hostComponentDesiredStateEntity);
+
+ entityManagerProvider.get().remove(hostComponentDesiredStateEntity);
hostDAO.merge(hostEntity);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
index 2eefe09..cc7b503 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
@@ -27,7 +27,6 @@ import javax.persistence.TypedQuery;
import org.apache.ambari.server.orm.RequiresSession;
import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.state.UpgradeState;
import com.google.inject.Inject;
@@ -177,13 +176,7 @@ public class HostComponentStateDAO {
@Transactional
public void remove(HostComponentStateEntity hostComponentStateEntity) {
- HostEntity hostEntity = hostDAO.findByName(hostComponentStateEntity.getHostName());
-
- entityManagerProvider.get().remove(merge(hostComponentStateEntity));
-
- // Make sure that the state entity is removed from its host entity
- hostEntity.removeHostComponentStateEntity(hostComponentStateEntity);
- hostDAO.merge(hostEntity);
+ entityManagerProvider.get().remove(hostComponentStateEntity);
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
index d367eb3..0d20fd3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
@@ -112,7 +112,7 @@ public class HostDAO {
@Transactional
public void remove(HostEntity hostEntity) {
- entityManagerProvider.get().remove(merge(hostEntity));
+ entityManagerProvider.get().remove(hostEntity);
}
public List<String> getHostNamesByHostIds(List<Long> hostIds) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
index 320c1be..5c76356 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
@@ -49,7 +49,7 @@ public class ClusterServiceEntity {
@JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
private ClusterEntity clusterEntity;
- @OneToOne(mappedBy = "clusterServiceEntity")
+ @OneToOne(mappedBy = "clusterServiceEntity", cascade = { CascadeType.PERSIST, CascadeType.MERGE })
private ServiceDesiredStateEntity serviceDesiredStateEntity;
@OneToMany(mappedBy = "clusterServiceEntity")
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
index fd15200..274a1e0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
@@ -37,6 +37,8 @@ import org.apache.ambari.server.state.MaintenanceState;
import org.apache.ambari.server.state.SecurityState;
import org.apache.ambari.server.state.State;
+import com.google.common.base.Objects;
+
@javax.persistence.IdClass(HostComponentDesiredStateEntityPK.class)
@javax.persistence.Table(name = "hostcomponentdesiredstate")
@Entity
@@ -255,4 +257,13 @@ public class HostComponentDesiredStateEntity {
public void setRestartRequired(boolean restartRequired) {
this.restartRequired = (restartRequired == false ? 0 : 1);
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String toString() {
+ return Objects.toStringHelper(this).add("serviceName", serviceName).add("componentName",
+ componentName).add("hostId", hostId).add("desiredState", desiredState).toString();
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
index 1555321..9d35e2a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
@@ -38,6 +38,8 @@ import org.apache.ambari.server.state.SecurityState;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.UpgradeState;
+import com.google.common.base.Objects;
+
@Entity
@Table(name = "hostcomponentstate")
@TableGenerator(
@@ -283,4 +285,13 @@ public class HostComponentStateEntity {
this.hostEntity = hostEntity;
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String toString() {
+ return Objects.toStringHelper(this).add("serviceName", serviceName).add("componentName",
+ componentName).add("hostId", hostId).add("state", currentState).toString();
+ }
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
index 0a9c82a..bd6cc0d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
@@ -333,8 +333,6 @@ public interface Host extends Comparable {
HostResponse convertToResponse();
- void refresh();
-
void importHostInfo(HostInfo hostInfo);
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index 48ab252..df3cfd8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -72,12 +72,6 @@ public interface Service {
void debugDump(StringBuilder sb);
- boolean isPersisted();
-
- void persist();
-
- void refresh();
-
ServiceComponent addServiceComponent(String serviceComponentName)
throws AmbariException;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index 36d4902..e4adc97 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -22,8 +22,10 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
import org.apache.ambari.annotations.Experimental;
import org.apache.ambari.annotations.ExperimentalFeature;
@@ -37,7 +39,6 @@ import org.apache.ambari.server.events.ServiceRemovedEvent;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.dao.ClusterDAO;
import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
-import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
import org.apache.ambari.server.orm.dao.StackDAO;
@@ -55,7 +56,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.ProvisionException;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.assistedinject.AssistedInject;
@@ -63,96 +63,98 @@ import com.google.inject.persist.Transactional;
public class ServiceImpl implements Service {
- private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
- // Cached entity has only 1 getter for name
- private ClusterServiceEntity serviceEntity;
- private ServiceDesiredStateEntity serviceDesiredStateEntity;
+ private final Lock lock = new ReentrantLock();
private ServiceDesiredStateEntityPK serviceDesiredStateEntityPK;
private ClusterServiceEntityPK serviceEntityPK;
private static final Logger LOG = LoggerFactory.getLogger(ServiceImpl.class);
- private volatile boolean persisted = false;
private final Cluster cluster;
- private Map<String, ServiceComponent> components;
+ private final ConcurrentMap<String, ServiceComponent> components = new ConcurrentHashMap<>();
private final boolean isClientOnlyService;
@Inject
private ServiceConfigDAO serviceConfigDAO;
- @Inject
- private ClusterServiceDAO clusterServiceDAO;
- @Inject
- private ServiceDesiredStateDAO serviceDesiredStateDAO;
- @Inject
- private ClusterDAO clusterDAO;
- @Inject
- private ServiceComponentFactory serviceComponentFactory;
- @Inject
- private AmbariMetaInfo ambariMetaInfo;
- @Inject
- private ConfigGroupDAO configGroupDAO;
+
+ private final ClusterServiceDAO clusterServiceDAO;
+ private final ServiceDesiredStateDAO serviceDesiredStateDAO;
+ private final ClusterDAO clusterDAO;
+ private final ServiceComponentFactory serviceComponentFactory;
/**
* Data access object for retrieving stack instances.
*/
- @Inject
- private StackDAO stackDAO;
+ private final StackDAO stackDAO;
/**
* Used to publish events relating to service CRUD operations.
*/
- @Inject
- private AmbariEventPublisher eventPublisher;
+ private final AmbariEventPublisher eventPublisher;
- private void init() {
- // TODO load from DB during restart?
- }
+ /**
+ * The name of the service.
+ */
+ private final String serviceName;
@AssistedInject
- public ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
- Injector injector) throws AmbariException {
- injector.injectMembers(this);
- serviceEntity = new ClusterServiceEntity();
+ ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName, ClusterDAO clusterDAO,
+ ClusterServiceDAO clusterServiceDAO, ServiceDesiredStateDAO serviceDesiredStateDAO,
+ ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
+ AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
+ throws AmbariException {
+ this.cluster = cluster;
+ this.clusterDAO = clusterDAO;
+ this.clusterServiceDAO = clusterServiceDAO;
+ this.serviceDesiredStateDAO = serviceDesiredStateDAO;
+ this.serviceComponentFactory = serviceComponentFactory;
+ this.stackDAO = stackDAO;
+ this.eventPublisher = eventPublisher;
+ this.serviceName = serviceName;
+
+ ClusterServiceEntity serviceEntity = new ClusterServiceEntity();
serviceEntity.setClusterId(cluster.getClusterId());
serviceEntity.setServiceName(serviceName);
- serviceDesiredStateEntity = new ServiceDesiredStateEntity();
+ ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
serviceDesiredStateEntity.setServiceName(serviceName);
serviceDesiredStateEntity.setClusterId(cluster.getClusterId());
-
serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity);
serviceEntityPK = getServiceEntityPK(serviceEntity);
serviceDesiredStateEntity.setClusterServiceEntity(serviceEntity);
serviceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
- this.cluster = cluster;
-
- components = new HashMap<String, ServiceComponent>();
-
StackId stackId = cluster.getDesiredStackVersion();
- setDesiredStackVersion(stackId);
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+ serviceDesiredStateEntity.setDesiredStack(stackEntity);
ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
stackId.getStackVersion(), serviceName);
+
isClientOnlyService = sInfo.isClientOnlyService();
- init();
+ persist(serviceEntity);
}
@AssistedInject
- public ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity
- serviceEntity, Injector injector) throws AmbariException {
- injector.injectMembers(this);
- this.serviceEntity = serviceEntity;
+ ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity serviceEntity,
+ ClusterDAO clusterDAO, ClusterServiceDAO clusterServiceDAO,
+ ServiceDesiredStateDAO serviceDesiredStateDAO,
+ ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
+ AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
+ throws AmbariException {
this.cluster = cluster;
-
- //TODO check for null states?
- serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
+ this.clusterDAO = clusterDAO;
+ this.clusterServiceDAO = clusterServiceDAO;
+ this.serviceDesiredStateDAO = serviceDesiredStateDAO;
+ this.serviceComponentFactory = serviceComponentFactory;
+ this.stackDAO = stackDAO;
+ this.eventPublisher = eventPublisher;
+ serviceName = serviceEntity.getServiceName();
+
+ ServiceDesiredStateEntity serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity);
serviceEntityPK = getServiceEntityPK(serviceEntity);
- components = new HashMap<String, ServiceComponent>();
-
if (!serviceEntity.getServiceComponentDesiredStateEntities().isEmpty()) {
for (ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity
: serviceEntity.getServiceComponentDesiredStateEntities()) {
@@ -174,13 +176,11 @@ public class ServiceImpl implements Service {
ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
stackId.getStackVersion(), getName());
isClientOnlyService = sInfo.isClientOnlyService();
-
- persisted = true;
}
@Override
public String getName() {
- return serviceEntity.getServiceName();
+ return serviceName;
}
@Override
@@ -190,12 +190,7 @@ public class ServiceImpl implements Service {
@Override
public Map<String, ServiceComponent> getServiceComponents() {
- readWriteLock.readLock().lock();
- try {
- return new HashMap<String, ServiceComponent>(components);
- } finally {
- readWriteLock.readLock().unlock();
- }
+ return new HashMap<String, ServiceComponent>(components);
}
@Override
@@ -217,7 +212,7 @@ public class ServiceImpl implements Service {
+ ", serviceName=" + getName()
+ ", serviceComponentName=" + component.getName());
}
-
+
components.put(component.getName(), component);
}
@@ -233,54 +228,40 @@ public class ServiceImpl implements Service {
@Override
public ServiceComponent getServiceComponent(String componentName)
throws AmbariException {
- readWriteLock.readLock().lock();
- try {
- if (!components.containsKey(componentName)) {
- throw new ServiceComponentNotFoundException(cluster.getClusterName(),
- getName(), componentName);
- }
- return components.get(componentName);
- } finally {
- readWriteLock.readLock().unlock();
+ ServiceComponent serviceComponent = components.get(componentName);
+ if (null == serviceComponent) {
+ throw new ServiceComponentNotFoundException(cluster.getClusterName(),
+ getName(), componentName);
}
+
+ return serviceComponent;
}
@Override
public State getDesiredState() {
- readWriteLock.readLock().lock();
- try {
- return getServiceDesiredStateEntity().getDesiredState();
- } finally {
- readWriteLock.readLock().unlock();
- }
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ return serviceDesiredStateEntity.getDesiredState();
}
@Override
public void setDesiredState(State state) {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting DesiredState of Service" + ", clusterName="
- + cluster.getClusterName() + ", clusterId="
- + cluster.getClusterId() + ", serviceName=" + getName()
- + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
- + state + ", persisted = " + isPersisted());
- }
- getServiceDesiredStateEntity().setDesiredState(state);
- saveIfPersisted();
- } finally {
- readWriteLock.writeLock().unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting DesiredState of Service" + ", clusterName="
+ + cluster.getClusterName() + ", clusterId="
+ + cluster.getClusterId() + ", serviceName=" + getName()
+ + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
+ + state);
}
+
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ serviceDesiredStateEntity.setDesiredState(state);
+ serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
}
@Override
public SecurityState getSecurityState() {
- readWriteLock.readLock().lock();
- try {
- return getServiceDesiredStateEntity().getSecurityState();
- } finally {
- readWriteLock.readLock().unlock();
- }
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ return serviceDesiredStateEntity.getSecurityState();
}
@Override
@@ -289,70 +270,52 @@ public class ServiceImpl implements Service {
throw new AmbariException("The security state must be an endpoint state");
}
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting DesiredSecurityState of Service" + ", clusterName="
- + cluster.getClusterName() + ", clusterId="
- + cluster.getClusterId() + ", serviceName=" + getName()
- + ", oldDesiredSecurityState=" + getSecurityState()
- + ", newDesiredSecurityState=" + securityState);
- }
- getServiceDesiredStateEntity().setSecurityState(securityState);
- saveIfPersisted();
- } finally {
- readWriteLock.writeLock().unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting DesiredSecurityState of Service" + ", clusterName="
+ + cluster.getClusterName() + ", clusterId="
+ + cluster.getClusterId() + ", serviceName=" + getName()
+ + ", oldDesiredSecurityState=" + getSecurityState()
+ + ", newDesiredSecurityState=" + securityState);
}
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ serviceDesiredStateEntity.setSecurityState(securityState);
+ serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
}
@Override
public StackId getDesiredStackVersion() {
- readWriteLock.readLock().lock();
- try {
- StackEntity desiredStackEntity = getServiceDesiredStateEntity().getDesiredStack();
- if( null != desiredStackEntity ) {
- return new StackId(desiredStackEntity);
- } else {
- return null;
- }
- } finally {
- readWriteLock.readLock().unlock();
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ StackEntity desiredStackEntity = serviceDesiredStateEntity.getDesiredStack();
+ if( null != desiredStackEntity ) {
+ return new StackId(desiredStackEntity);
+ } else {
+ return null;
}
}
@Override
public void setDesiredStackVersion(StackId stack) {
- readWriteLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
- + cluster.getClusterName() + ", clusterId="
- + cluster.getClusterId() + ", serviceName=" + getName()
- + ", oldDesiredStackVersion=" + getDesiredStackVersion()
- + ", newDesiredStackVersion=" + stack);
- }
-
- StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
- getServiceDesiredStateEntity().setDesiredStack(stackEntity);
- saveIfPersisted();
- } finally {
- readWriteLock.writeLock().unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
+ + cluster.getClusterName() + ", clusterId="
+ + cluster.getClusterId() + ", serviceName=" + getName()
+ + ", oldDesiredStackVersion=" + getDesiredStackVersion()
+ + ", newDesiredStackVersion=" + stack);
}
+
+ StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ serviceDesiredStateEntity.setDesiredStack(stackEntity);
+ serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
}
@Override
public ServiceResponse convertToResponse() {
- readWriteLock.readLock().lock();
- try {
- ServiceResponse r = new ServiceResponse(cluster.getClusterId(),
- cluster.getClusterName(), getName(),
- getDesiredStackVersion().getStackId(), getDesiredState().toString());
+ ServiceResponse r = new ServiceResponse(cluster.getClusterId(), cluster.getClusterName(),
+ getName(), getDesiredStackVersion().getStackId(), getDesiredState().toString());
- r.setMaintenanceState(getMaintenanceState().name());
- return r;
- } finally {
- readWriteLock.readLock().unlock();
- }
+ r.setMaintenanceState(getMaintenanceState().name());
+ return r;
}
@Override
@@ -362,138 +325,79 @@ public class ServiceImpl implements Service {
@Override
public void debugDump(StringBuilder sb) {
- readWriteLock.readLock().lock();
- try {
- sb.append("Service={ serviceName=" + getName() + ", clusterName="
- + cluster.getClusterName() + ", clusterId=" + cluster.getClusterId()
- + ", desiredStackVersion=" + getDesiredStackVersion()
- + ", desiredState=" + getDesiredState().toString()
- + ", components=[ ");
- boolean first = true;
- for (ServiceComponent sc : components.values()) {
- if (!first) {
- sb.append(" , ");
- }
- first = false;
- sb.append("\n ");
- sc.debugDump(sb);
- sb.append(" ");
+ sb.append("Service={ serviceName=" + getName() + ", clusterName=" + cluster.getClusterName()
+ + ", clusterId=" + cluster.getClusterId() + ", desiredStackVersion="
+ + getDesiredStackVersion() + ", desiredState=" + getDesiredState().toString()
+ + ", components=[ ");
+ boolean first = true;
+ for (ServiceComponent sc : components.values()) {
+ if (!first) {
+ sb.append(" , ");
}
- sb.append(" ] }");
- } finally {
- readWriteLock.readLock().unlock();
+ first = false;
+ sb.append("\n ");
+ sc.debugDump(sb);
+ sb.append(" ");
}
+ sb.append(" ] }");
}
/**
- * {@inheritDoc}
- */
- @Override
- public boolean isPersisted() {
- // a lock around this internal state variable is not required since we
- // have appropriate locks in the persist() method and this member is
- // only ever false under the condition that the object is new
- return persisted;
- }
-
- /**
- * {@inheritDoc}
- * <p/>
- * This method uses Java locks and then delegates to internal methods which
- * perform the JPA merges inside of a transaction. Because of this, a
- * transaction is not necessary before this calling this method.
+ *
*/
- @Override
@Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
- public void persist() {
- readWriteLock.writeLock().lock();
- try {
- if (!persisted) {
- persistEntities();
- refresh();
-
- persisted = true;
+ private void persist(ClusterServiceEntity serviceEntity) {
+ persistEntities(serviceEntity);
+ refresh();
- // publish the service installed event
- StackId stackId = cluster.getDesiredStackVersion();
- cluster.addService(this);
+ // publish the service installed event
+ StackId stackId = cluster.getDesiredStackVersion();
+ cluster.addService(this);
- ServiceInstalledEvent event = new ServiceInstalledEvent(
- getClusterId(), stackId.getStackName(),
- stackId.getStackVersion(), getName());
+ ServiceInstalledEvent event = new ServiceInstalledEvent(getClusterId(), stackId.getStackName(),
+ stackId.getStackVersion(), getName());
- eventPublisher.publish(event);
- } else {
- saveIfPersisted();
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ eventPublisher.publish(event);
}
@Transactional
- protected void persistEntities() {
+ private void persistEntities(ClusterServiceEntity serviceEntity) {
long clusterId = cluster.getClusterId();
-
ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
serviceEntity.setClusterEntity(clusterEntity);
clusterServiceDAO.create(serviceEntity);
- serviceDesiredStateDAO.create(serviceDesiredStateEntity);
clusterEntity.getClusterServiceEntities().add(serviceEntity);
clusterDAO.merge(clusterEntity);
clusterServiceDAO.merge(serviceEntity);
- serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
- }
-
- @Transactional
- void saveIfPersisted() {
- if (isPersisted()) {
- clusterServiceDAO.merge(serviceEntity);
- serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
- }
}
- @Override
@Transactional
public void refresh() {
- readWriteLock.writeLock().lock();
- try {
- if (isPersisted()) {
- ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
- pk.setClusterId(getClusterId());
- pk.setServiceName(getName());
- serviceEntity = clusterServiceDAO.findByPK(pk);
- serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
- clusterServiceDAO.refresh(serviceEntity);
- serviceDesiredStateDAO.refresh(serviceDesiredStateEntity);
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
+ pk.setClusterId(getClusterId());
+ pk.setServiceName(getName());
+ ClusterServiceEntity serviceEntity = getServiceEntity();
+ clusterServiceDAO.refresh(serviceEntity);
+ serviceDesiredStateDAO.refresh(serviceEntity.getServiceDesiredStateEntity());
}
@Override
@Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public boolean canBeRemoved() {
- readWriteLock.readLock().lock();
- try {
- //
- // A service can be deleted if all it's components
- // can be removed, irrespective of the state of
- // the service itself.
- //
- for (ServiceComponent sc : components.values()) {
- if (!sc.canBeRemoved()) {
- LOG.warn("Found non removable component when trying to delete service" + ", clusterName="
- + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName="
- + sc.getName());
- return false;
- }
+ //
+ // A service can be deleted if all it's components
+ // can be removed, irrespective of the state of
+ // the service itself.
+ //
+ for (ServiceComponent sc : components.values()) {
+ if (!sc.canBeRemoved()) {
+ LOG.warn("Found non removable component when trying to delete service" + ", clusterName="
+ + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName="
+ + sc.getName());
+ return false;
}
- return true;
- } finally {
- readWriteLock.readLock().unlock();
}
+ return true;
}
@Transactional
@@ -537,7 +441,7 @@ public class ServiceImpl implements Service {
@Transactional
@Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteAllComponents() throws AmbariException {
- readWriteLock.writeLock().lock();
+ lock.lock();
try {
LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName()
+ ", serviceName=" + getName());
@@ -556,7 +460,7 @@ public class ServiceImpl implements Service {
components.clear();
} finally {
- readWriteLock.writeLock().unlock();
+ lock.unlock();
}
}
@@ -564,7 +468,7 @@ public class ServiceImpl implements Service {
@Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void deleteServiceComponent(String componentName)
throws AmbariException {
- readWriteLock.writeLock().lock();
+ lock.lock();
try {
ServiceComponent component = getServiceComponent(componentName);
LOG.info("Deleting servicecomponent for cluster" + ", clusterName=" + cluster.getClusterName()
@@ -580,7 +484,7 @@ public class ServiceImpl implements Service {
component.delete();
components.remove(componentName);
} finally {
- readWriteLock.writeLock().unlock();
+ lock.unlock();
}
}
@@ -593,26 +497,18 @@ public class ServiceImpl implements Service {
@Transactional
@Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
public void delete() throws AmbariException {
- readWriteLock.writeLock().lock();
- try {
- deleteAllComponents();
- deleteAllServiceConfigs();
+ deleteAllComponents();
+ deleteAllServiceConfigs();
- if (persisted) {
- removeEntities();
- persisted = false;
+ removeEntities();
- // publish the service removed event
- StackId stackId = cluster.getDesiredStackVersion();
+ // publish the service removed event
+ StackId stackId = cluster.getDesiredStackVersion();
- ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
- stackId.getStackVersion(), getName());
+ ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
+ stackId.getStackVersion(), getName());
- eventPublisher.publish(event);
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ eventPublisher.publish(event);
}
@Transactional
@@ -628,17 +524,13 @@ public class ServiceImpl implements Service {
@Override
public void setMaintenanceState(MaintenanceState state) {
- readWriteLock.writeLock().lock();
- try {
- getServiceDesiredStateEntity().setMaintenanceState(state);
- saveIfPersisted();
+ ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+ serviceDesiredStateEntity.setMaintenanceState(state);
+ serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
- // broadcast the maintenance mode change
- MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
- eventPublisher.publish(event);
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ // broadcast the maintenance mode change
+ MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
+ eventPublisher.publish(event);
}
@Override
@@ -647,10 +539,7 @@ public class ServiceImpl implements Service {
}
private ClusterServiceEntity getServiceEntity() {
- if (isPersisted()) {
- serviceEntity = clusterServiceDAO.findByPK(serviceEntityPK);
- }
- return serviceEntity;
+ return clusterServiceDAO.findByPK(serviceEntityPK);
}
private ClusterServiceEntityPK getServiceEntityPK(ClusterServiceEntity serviceEntity) {
@@ -669,9 +558,6 @@ public class ServiceImpl implements Service {
// Refresh the cached reference on setters
private ServiceDesiredStateEntity getServiceDesiredStateEntity() {
- if (isPersisted()) {
- serviceDesiredStateEntity = serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
- }
- return serviceDesiredStateEntity;
+ return serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index aff3a3b..bed33d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -464,8 +464,6 @@ public class ClustersImpl implements Clusters {
host.setHostAttributes(attributes);
}
- host.refresh();
-
Set<String> hostClusterNames = hostClusters.get(hostname);
for (String clusterName : hostClusterNames) {
if (clusterName != null && !clusterName.isEmpty()) {
@@ -554,7 +552,6 @@ public class ClustersImpl implements Clusters {
clusterHostMap.get(clusterName).add(host);
cluster.refresh();
- host.refresh();
}
/**
@@ -673,7 +670,6 @@ public class ClustersImpl implements Clusters {
unmapHostFromClusters(host, Sets.newHashSet(cluster));
cluster.refresh();
- host.refresh();
}
@Transactional
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index ec8873a..286b5ca 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -108,7 +108,6 @@ public class HostImpl implements Host {
new TypeToken<Map<Long, MaintenanceState>>() {}.getType();
ReadWriteLock rwLock;
- private final Lock readLock;
private final Lock writeLock;
@Inject
@@ -257,7 +256,6 @@ public class HostImpl implements Host {
stateMachine = stateMachineFactory.make(this);
rwLock = new ReentrantReadWriteLock();
- readLock = rwLock.readLock();
writeLock = rwLock.writeLock();
HostStateEntity hostStateEntity = hostEntity.getHostStateEntity();
@@ -274,8 +272,6 @@ public class HostImpl implements Host {
if (null == hostEntity.getHostId()) {
persistEntities(hostEntity);
- refresh();
-
for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
try {
clusters.getClusterById(clusterEntity.getClusterId()).refresh();
@@ -963,17 +959,6 @@ public class HostImpl implements Host {
@Override
@Transactional
- public void refresh() {
- writeLock.lock();
- try {
- getHostEntity();
- } finally {
- writeLock.unlock();
- }
- }
-
- @Override
- @Transactional
public boolean addDesiredConfig(long clusterId, boolean selected, String user, Config config) {
if (null == user) {
throw new NullPointerException("User must be specified.");
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 7e345e5..a575456 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1563,12 +1563,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
try {
if (persisted) {
removeEntities();
-
- // host must be re-loaded from db to refresh the cached JPA HostEntity
- // that references HostComponentDesiredStateEntity
- // and HostComponentStateEntity JPA entities
- host.refresh();
-
persisted = false;
fireRemovalEvent = true;
}
@@ -1604,14 +1598,16 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
protected void removeEntities() {
HostComponentStateEntity stateEntity = getStateEntity();
if (stateEntity != null) {
- // make sure that the state entities are removed from the associated (detached) host entity
- // Also refresh before delete
- stateEntity.getHostEntity().removeHostComponentStateEntity(stateEntity);
+ HostEntity hostEntity = stateEntity.getHostEntity();
HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
- desiredStateEntity.getHostEntity().removeHostComponentDesiredStateEntity(desiredStateEntity);
- hostComponentDesiredStateDAO.remove(desiredStateEntity);
+ // Make sure that the state entity is removed from its host entity
+ hostEntity.removeHostComponentStateEntity(stateEntity);
+ hostEntity.removeHostComponentDesiredStateEntity(desiredStateEntity);
+ hostDAO.merge(hostEntity);
+
+ hostComponentDesiredStateDAO.remove(desiredStateEntity);
hostComponentStateDAO.remove(stateEntity);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
index 17f1447..d732edf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
@@ -86,6 +86,7 @@ public class RetryHelper {
public static void invalidateAffectedClusters() {
for (Cluster cluster : affectedClusters.get()) {
s_clusters.invalidate(cluster);
+ affectedClusters.get().remove(cluster);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index dd93374..64305ff 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -159,7 +159,6 @@ public class HeartbeatProcessorTest {
public void testHeartbeatWithConfigs() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -229,7 +228,6 @@ public class HeartbeatProcessorTest {
public void testRestartRequiredAfterInstallClient() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(HDFS_CLIENT).persist();
hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
@@ -295,7 +293,6 @@ public class HeartbeatProcessorTest {
public void testHeartbeatCustomCommandWithConfigs() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -380,7 +377,6 @@ public class HeartbeatProcessorTest {
public void testHeartbeatCustomStartStop() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -465,7 +461,6 @@ public class HeartbeatProcessorTest {
public void testStatusHeartbeat() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -595,7 +590,6 @@ public class HeartbeatProcessorTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
@@ -715,7 +709,6 @@ public class HeartbeatProcessorTest {
public void testUpgradeSpecificHandling() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
@@ -810,7 +803,6 @@ public class HeartbeatProcessorTest {
public void testCommandStatusProcesses() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -892,7 +884,6 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeCompleteReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -977,7 +968,6 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeFailReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -1098,7 +1088,6 @@ public class HeartbeatProcessorTest {
public void testComponentUpgradeInProgressReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -1305,7 +1294,6 @@ public class HeartbeatProcessorTest {
public void testComponentInProgressStatusSafeAfterStatusReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).
addServiceComponentHost(DummyHostname1).persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 0f48cf6..6205f59 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -221,7 +221,6 @@ public class TestHeartbeatHandler {
public void testStatusHeartbeatWithAnnotation() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.addServiceComponent(NAMENODE).persist();
hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
@@ -273,7 +272,6 @@ public class TestHeartbeatHandler {
public void testLiveStatusUpdateAfterStopFailed() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).
addServiceComponentHost(DummyHostname1).persist();
@@ -385,7 +383,6 @@ public class TestHeartbeatHandler {
injector);
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -449,7 +446,6 @@ public class TestHeartbeatHandler {
injector);
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
/**
* Add three service components enabled for auto start.
@@ -784,7 +780,6 @@ public class TestHeartbeatHandler {
public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -842,7 +837,6 @@ public class TestHeartbeatHandler {
public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -917,7 +911,6 @@ public class TestHeartbeatHandler {
public void testStatusHeartbeatWithVersion() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -995,7 +988,6 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -1077,7 +1069,6 @@ public class TestHeartbeatHandler {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Host hostObject = clusters.getHost(DummyHostname1);
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.addServiceComponent(NAMENODE).persist();
@@ -1412,7 +1403,6 @@ public class TestHeartbeatHandler {
public void testCommandStatusProcesses_empty() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).persist();
hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index 503c8e5..e6a3ee6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -168,7 +168,6 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.persist();
hdfs.addServiceComponent(Role.DATANODE.name()).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -261,7 +260,6 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.persist();
hdfs.addServiceComponent(Role.DATANODE.name()).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
(hostname1).persist();
@@ -370,7 +368,6 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.persist();
hdfs.addServiceComponent(Role.DATANODE.name()).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -453,7 +450,6 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.persist();
hdfs.addServiceComponent(Role.DATANODE.name()).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -573,7 +569,6 @@ public class TestHeartbeatMonitor {
clusters.mapHostsToCluster(hostNames, clusterName);
Service hdfs = cluster.addService(serviceName);
- hdfs.persist();
hdfs.addServiceComponent(Role.DATANODE.name()).persist();
hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 9f79a32..4641dfc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -87,6 +87,7 @@ import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptorFactory;
import org.apache.ambari.server.state.stack.Metric;
import org.apache.ambari.server.state.stack.MetricDefinition;
import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.utils.EventBusSynchronizer;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.easymock.Capture;
@@ -1899,6 +1900,8 @@ public class AmbariMetaInfoTest {
Injector injector = Guice.createInjector(Modules.override(
new InMemoryDefaultTestModule()).with(new MockModule()));
+ EventBusSynchronizer.synchronizeAmbariEventPublisher(injector);
+
injector.getInstance(GuiceJpaInitializer.class);
injector.getInstance(EntityManager.class);
long clusterId = injector.getInstance(OrmTestHelper.class).createCluster(
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 50f5abe..351d473 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -135,7 +135,6 @@ public class RecoveryConfigHelperTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -171,7 +170,6 @@ public class RecoveryConfigHelperTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -209,7 +207,6 @@ public class RecoveryConfigHelperTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -250,7 +247,6 @@ public class RecoveryConfigHelperTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -287,7 +283,6 @@ public class RecoveryConfigHelperTest {
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
@@ -330,7 +325,6 @@ public class RecoveryConfigHelperTest {
// Add HDFS service with DATANODE component to the cluster
Service hdfs = cluster.addService(HDFS);
- hdfs.persist();
hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
hdfs.getServiceComponent(DATANODE).persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index a3521fd..d61a3e7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -1006,8 +1006,6 @@ public class AmbariManagementControllerTest {
Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
c1.addService(s1);
c1.addService(s2);
- s1.persist();
- s2.persist();
set1.clear();
ServiceComponentRequest valid1 =
@@ -1311,8 +1309,6 @@ public class AmbariManagementControllerTest {
Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
c1.addService(s1);
c1.addService(s2);
- s1.persist();
- s2.persist();
Set<ServiceComponentRequest> set1 = new HashSet<ServiceComponentRequest>();
ServiceComponentRequest valid1 =
@@ -1622,13 +1618,10 @@ public class AmbariManagementControllerTest {
Service s1 = serviceFactory.createNew(foo, "HDFS");
foo.addService(s1);
- s1.persist();
Service s2 = serviceFactory.createNew(c1, "HDFS");
c1.addService(s2);
- s2.persist();
Service s3 = serviceFactory.createNew(c2, "HDFS");
c2.addService(s3);
- s3.persist();
try {
@@ -2268,8 +2261,6 @@ public class AmbariManagementControllerTest {
s1.setDesiredStackVersion(new StackId("HDP-0.1"));
s1.setDesiredState(State.INSTALLED);
- s1.persist();
-
ServiceRequest r = new ServiceRequest(cluster1, null, null);
Set<ServiceResponse> resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
@@ -2314,12 +2305,6 @@ public class AmbariManagementControllerTest {
s2.setDesiredState(State.INSTALLED);
s4.setDesiredState(State.INSTALLED);
- s1.persist();
- s2.persist();
- s3.persist();
- s4.persist();
- s5.persist();
-
ServiceRequest r = new ServiceRequest(null, null, null);
Set<ServiceResponse> resp;
@@ -2378,7 +2363,6 @@ public class AmbariManagementControllerTest {
Service s1 = serviceFactory.createNew(c1, "HDFS");
c1.addService(s1);
s1.setDesiredState(State.INSTALLED);
- s1.persist();
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
s1.addServiceComponent(sc1);
sc1.persist();
@@ -2430,12 +2414,6 @@ public class AmbariManagementControllerTest {
s2.setDesiredState(State.INSTALLED);
s4.setDesiredState(State.INSTALLED);
- s1.persist();
- s2.persist();
- s3.persist();
- s4.persist();
- s5.persist();
-
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
@@ -2548,7 +2526,6 @@ public class AmbariManagementControllerTest {
Cluster c1 = setupClusterWithHosts(cluster1, "HDP-0.1", Lists.newArrayList(host1), "centos5");
Service s1 = serviceFactory.createNew(c1, "HDFS");
c1.addService(s1);
- s1.persist();
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
s1.addServiceComponent(sc1);
sc1.setDesiredState(State.UNINSTALLED);
@@ -2964,10 +2941,6 @@ public class AmbariManagementControllerTest {
s1.setDesiredState(State.INSTALLED);
s2.setDesiredState(State.INSTALLED);
- s1.persist();
- s2.persist();
- s3.persist();
-
ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
@@ -4285,10 +4258,7 @@ public class AmbariManagementControllerTest {
cluster.addConfig(config3);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
-
Service mapred = cluster.addService("YARN");
- mapred.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -4454,7 +4424,6 @@ public class AmbariManagementControllerTest {
cluster.addConfig(config2);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -4564,10 +4533,7 @@ public class AmbariManagementControllerTest {
cluster.addDesiredConfig("_test", Collections.singleton(config2));
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
-
Service hive = cluster.addService("HIVE");
- hive.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -4852,8 +4818,6 @@ public class AmbariManagementControllerTest {
Service hdfs = cluster.addService("HDFS");
Service mapReduce = cluster.addService("MAPREDUCE");
- hdfs.persist();
- mapReduce.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
@@ -6532,10 +6496,7 @@ public class AmbariManagementControllerTest {
cluster.addConfig(config2);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
-
Service mapred = cluster.addService("YARN");
- mapred.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -6641,10 +6602,7 @@ public class AmbariManagementControllerTest {
cluster.addConfig(config2);
Service hdfs = cluster.addService("HDFS");
- hdfs.persist();
-
Service mapred = cluster.addService("YARN");
- mapred.persist();
hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
@@ -10101,7 +10059,6 @@ public class AmbariManagementControllerTest {
"centos5");
Service hdfs = c1.addService("HDFS");
- hdfs.persist();
createServiceComponent(cluster1, "HDFS", "NAMENODE", State.INIT);
createServiceComponent(cluster1, "HDFS", "DATANODE", State.INIT);
createServiceComponent(cluster1, "HDFS", "HDFS_CLIENT", State.INIT);
http://git-wip-us.apache.org/repos/asf/ambari/blob/0de69e10/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
index 6a0fa12..e09b9c5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
@@ -18,32 +18,40 @@
package org.apache.ambari.server.controller.internal;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionDBAccessor;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.checks.AbstractCheckDescriptor;
import org.apache.ambari.server.checks.UpgradeCheckRegistry;
import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.controller.RequestStatusResponse;
import org.apache.ambari.server.controller.spi.Predicate;
import org.apache.ambari.server.controller.spi.Request;
import org.apache.ambari.server.controller.spi.Resource;
import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.controller.utilities.PredicateBuilder;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.events.jpa.EntityManagerCacheInvalidationEvent;
import org.apache.ambari.server.orm.DBAccessor;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.scheduler.ExecutionScheduler;
import org.apache.ambari.server.stack.StackManagerFactory;
import org.apache.ambari.server.state.CheckHelper;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceInfo;
@@ -56,41 +64,14 @@ import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
-import org.easymock.EasyMock;
import org.junit.Assert;
import org.junit.Test;
-import java.io.File;
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import javax.persistence.EntityManager;
-
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Provider;
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
/**
* PreUpgradeCheckResourceProvider tests.
*/
@@ -126,7 +107,6 @@ public class PreUpgradeCheckResourceProviderTest {
// set expectations
expect(managementController.getClusters()).andReturn(clusters).anyTimes();
expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
- expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
expect(cluster.getServices()).andReturn(allServiceMap).anyTimes();
[27/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa29f56c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa29f56c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa29f56c
Branch: refs/heads/trunk
Commit: aa29f56cb8d5024602717716f6d218a695f92724
Parents: 5dff9ac 41c49e1
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 17 10:16:43 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 17 10:16:43 2016 -0400
----------------------------------------------------------------------
.../4.0.0.2.0/package/scripts/service_check.py | 9 +-
.../scripts/shared_initialization.py | 2 +-
.../stacks/HDP/2.6/services/YARN/metainfo.xml | 4 +
.../BlueprintConfigurationProcessorTest.java | 5 ++
ambari-web/app/styles/alerts.less | 14 ---
ambari-web/app/styles/application.less | 40 +--------
ambari-web/app/styles/config_history_flow.less | 3 -
ambari-web/app/styles/modal_popups.less | 5 +-
.../app/styles/theme/bootstrap-ambari.css | 38 ++++-----
.../templates/common/configs/overrideWindow.hbs | 34 ++++----
.../common/configs/propertyDependence.hbs | 2 +-
.../config_recommendation_popup.hbs | 2 +-
.../modal_popups/dependent_configs_list.hbs | 4 +-
.../modal_popups/hosts_table_list_popup.hbs | 16 ++--
ambari-web/app/templates/common/settings.hbs | 2 +-
ambari-web/app/templates/experimental.hbs | 8 +-
.../templates/main/admin/serviceAccounts.hbs | 2 +-
.../main/admin/stack_upgrade/services.hbs | 2 +-
.../upgrade_configs_merge_table.hbs | 4 +-
.../admin/stack_upgrade/upgrade_history.hbs | 2 +-
ambari-web/app/templates/main/alerts.hbs | 2 +-
.../alerts/add_definition_to_group_popup.hbs | 52 ++++++------
.../main/alerts/definition_details.hbs | 4 +-
.../templates/main/dashboard/config_history.hbs | 2 +-
ambari-web/app/templates/main/host.hbs | 2 +-
.../app/templates/main/host/addHost/step4.hbs | 2 +-
.../app/templates/main/host/host_alerts.hbs | 4 +-
ambari-web/app/templates/main/host/logs.hbs | 8 +-
.../app/templates/main/host/stack_versions.hbs | 2 +-
.../main/service/info/configs_save_popup.hbs | 20 +++--
ambari-web/app/templates/main/views.hbs | 4 +-
ambari-web/app/templates/wizard/step1.hbs | 16 ++--
ambari-web/app/templates/wizard/step3.hbs | 2 +-
ambari-web/app/templates/wizard/step4.hbs | 4 +-
ambari-web/app/templates/wizard/step6.hbs | 2 +-
.../wizard/step9/step9_install_host_popup.hbs | 2 +-
.../app/views/main/alert_definitions_view.js | 4 +-
.../MICROSOFT_R/8.0.5/kerberos.json | 15 ----
.../MICROSOFT_R/8.0.5/metainfo.xml | 53 ------------
.../package/files/microsoft_r_serviceCheck.r | 28 ------
.../8.0.5/package/scripts/microsoft_r.py | 38 ---------
.../MICROSOFT_R/8.0.5/package/scripts/params.py | 31 -------
.../8.0.5/package/scripts/params_linux.py | 79 -----------------
.../8.0.5/package/scripts/params_windows.py | 26 ------
.../8.0.5/package/scripts/service_check.py | 89 --------------------
.../MICROSOFT_R/8.0.5/role_command_order.json | 6 --
.../MICROSOFT_R/8.0.5/service_advisor.py | 73 ----------------
.../MICROSOFT_R_SERVER/8.0.5/kerberos.json | 15 ++++
.../MICROSOFT_R_SERVER/8.0.5/metainfo.xml | 53 ++++++++++++
.../files/microsoft_r_server_serviceCheck.r | 28 ++++++
.../8.0.5/package/scripts/microsoft_r_server.py | 38 +++++++++
.../8.0.5/package/scripts/params.py | 31 +++++++
.../8.0.5/package/scripts/params_linux.py | 79 +++++++++++++++++
.../8.0.5/package/scripts/params_windows.py | 26 ++++++
.../8.0.5/package/scripts/service_check.py | 89 ++++++++++++++++++++
.../8.0.5/role_command_order.json | 6 ++
.../MICROSOFT_R_SERVER/8.0.5/service_advisor.py | 73 ++++++++++++++++
.../MICROSOFT_R/8.0.5/metainfo.xml | 47 -----------
.../MICROSOFT_R/8.0.5/repos/repoinfo.xml | 40 ---------
.../MICROSOFT_R_SERVER/8.0.5/metainfo.xml | 47 +++++++++++
.../MICROSOFT_R_SERVER/8.0.5/repos/repoinfo.xml | 40 +++++++++
.../src/main/resources/mpack.json | 10 +--
62 files changed, 684 insertions(+), 706 deletions(-)
----------------------------------------------------------------------
[03/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/817aed4b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/817aed4b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/817aed4b
Branch: refs/heads/trunk
Commit: 817aed4bdd800801cc9c21cc265ce577f4b16476
Parents: f0da4fa 5fa2d9b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Sep 29 13:21:14 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Sep 29 13:21:14 2016 -0400
----------------------------------------------------------------------
ambari-server/pom.xml | 23 +-
.../checks/ServiceCheckValidityCheck.java | 16 +-
.../internal/HostResourceProvider.java | 130 ++++--
.../server/controller/internal/RequestImpl.java | 2 +-
.../python/ambari_server/dbConfiguration.py | 52 ++-
.../AMBARI_METRICS/0.1.0/metainfo.xml | 17 +-
.../stacks/HDP/2.5/services/ATLAS/metainfo.xml | 2 +-
.../stacks/HDP/2.5/services/SPARK/kerberos.json | 2 +-
.../stacks/HDP/2.5/services/YARN/metainfo.xml | 5 +
.../main/resources/stacks/HDP/2.6/metainfo.xml | 23 +
.../resources/stacks/HDP/2.6/repos/repoinfo.xml | 132 ++++++
.../HDP/2.6/services/ACCUMULO/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/ATLAS/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/FALCON/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/FLUME/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/HBASE/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/HDFS/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/HIVE/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/KAFKA/metainfo.xml | 26 ++
.../HDP/2.6/services/KERBEROS/metainfo.xml | 25 ++
.../stacks/HDP/2.6/services/KNOX/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/MAHOUT/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/OOZIE/metainfo.xml | 25 ++
.../stacks/HDP/2.6/services/PIG/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/RANGER/metainfo.xml | 29 ++
.../HDP/2.6/services/RANGER_KMS/metainfo.xml | 29 ++
.../stacks/HDP/2.6/services/SLIDER/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/SPARK/metainfo.xml | 29 ++
.../stacks/HDP/2.6/services/SPARK2/metainfo.xml | 29 ++
.../stacks/HDP/2.6/services/SQOOP/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/STORM/metainfo.xml | 27 ++
.../stacks/HDP/2.6/services/TEZ/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/YARN/metainfo.xml | 27 ++
.../HDP/2.6/services/ZEPPELIN/metainfo.xml | 27 ++
.../HDP/2.6/services/ZOOKEEPER/metainfo.xml | 26 ++
.../stacks/HDP/2.6/services/stack_advisor.py | 21 +
.../server/agent/TestHeartbeatMonitor.java | 32 +-
.../server/api/services/AmbariMetaInfoTest.java | 14 +-
.../checks/ServiceCheckValidityCheckTest.java | 4 +-
.../server/checks/UpgradeCheckOrderTest.java | 4 +-
.../AmbariManagementControllerTest.java | 106 +++++
.../ConfigGroupResourceProviderTest.java | 229 +++++-----
.../internal/HostResourceProviderTest.java | 15 +-
...ThreadPoolExecutorCompletionServiceTest.java | 2 +-
.../metadata/AgentAlertDefinitionsTest.java | 2 +-
.../apache/ambari/server/orm/OrmTestHelper.java | 12 +-
.../apache/ambari/server/orm/TestOrmImpl.java | 94 ++---
.../server/orm/dao/AlertDispatchDAOTest.java | 419 ++++++-------------
.../security/authorization/TestUsers.java | 369 ++++++----------
.../upgrades/ConfigureActionTest.java | 104 ++---
.../upgrades/UpgradeActionTest.java | 150 ++-----
.../apache/ambari/server/state/ServiceTest.java | 106 ++---
.../svccomphost/ServiceComponentHostTest.java | 167 +++-----
.../server/upgrade/UpgradeCatalog210Test.java | 34 +-
.../src/test/python/TestAmbariServer.py | 107 ++---
55 files changed, 1745 insertions(+), 1287 deletions(-)
----------------------------------------------------------------------
[28/32] ambari git commit: AMBARI-18614 - Remove Unnecessary Locks
Inside Of SCH Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7b3f671..c06debb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -2523,8 +2523,6 @@ public class AmbariManagementControllerTest {
sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
sch1.setStackVersion(new StackId("HDP-0.1"));
- sch1.persist();
-
sch1.updateActualConfigs(new HashMap<String, Map<String,String>>() {{
put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
}});
@@ -2958,13 +2956,6 @@ public class AmbariManagementControllerTest {
sch4.setDesiredState(State.INSTALLED);
sch5.setDesiredState(State.UNINSTALLED);
- sch1.persist();
- sch2.persist();
- sch3.persist();
- sch4.persist();
- sch5.persist();
- sch6.persist();
-
ServiceComponentHostRequest r =
new ServiceComponentHostRequest(null, null, null, null, null);
@@ -3959,9 +3950,7 @@ public class AmbariManagementControllerTest {
Assert.assertEquals("testServiceComponentHostUpdateStackId", stages.get(0).getRequestContext());
Assert.assertEquals(State.UPGRADING, sch1.getState());
Assert.assertEquals(State.UPGRADING, sch2.getState());
- sch1.refresh();
Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
- sch2.refresh();
Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
for (HostRoleCommand command : stages.get(0).getOrderedHostRoleCommands()) {
ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
@@ -4005,11 +3994,8 @@ public class AmbariManagementControllerTest {
Assert.assertEquals(State.UPGRADING, sch1.getState());
Assert.assertEquals(State.UPGRADING, sch2.getState());
Assert.assertEquals(State.UPGRADING, sch3.getState());
- sch1.refresh();
Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
- sch2.refresh();
Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
- sch3.refresh();
Assert.assertTrue(sch3.getDesiredStackVersion().compareTo(newStack) == 0);
for (Stage stage : stages) {
for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) {
@@ -4248,10 +4234,10 @@ public class AmbariManagementControllerTest {
mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
String actionDef1 = getUniqueName();
String actionDef2 = getUniqueName();
@@ -4411,9 +4397,9 @@ public class AmbariManagementControllerTest {
hdfs.addServiceComponent(Role.NAMENODE.name());
hdfs.addServiceComponent(Role.DATANODE.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
installService(cluster1, "HDFS", false, false);
@@ -4523,9 +4509,9 @@ public class AmbariManagementControllerTest {
hive.addServiceComponent(Role.HIVE_SERVER.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
Map<String, String> params = new HashMap<String, String>() {{
put("test", "test");
@@ -4804,8 +4790,8 @@ public class AmbariManagementControllerTest {
hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2);
Map<String, String> params = new HashMap<String, String>() {{
put("test", "test");
@@ -6486,10 +6472,10 @@ public class AmbariManagementControllerTest {
mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
String action1 = getUniqueName();
@@ -6592,12 +6578,12 @@ public class AmbariManagementControllerTest {
mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
- hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
- hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+ hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+ hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
- mapred.getServiceComponent(Role.RESOURCEMANAGER.name()).addServiceComponentHost(host2).persist();
+ mapred.getServiceComponent(Role.RESOURCEMANAGER.name()).addServiceComponentHost(host2);
Map<String, String> params = new HashMap<String, String>() {{
put("test", "test");
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index 43de27f..79d5569 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -395,7 +395,5 @@ public class EventsTest {
sch.setState(State.INSTALLED);
sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
sch.setStackVersion(new StackId("HDP-2.0.6"));
-
- sch.persist();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index fffda6c..15e81c5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -419,7 +419,6 @@ public class OrmTestHelper {
ServiceComponent serviceComponent = service.getServiceComponent(componentName);
ServiceComponentHost serviceComponentHost = serviceComponent.addServiceComponentHost(hostName);
serviceComponentHost.setDesiredState(State.INSTALLED);
- serviceComponentHost.persist();
}
/**
@@ -447,8 +446,6 @@ public class OrmTestHelper {
sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
sch.setStackVersion(new StackId("HDP-2.0.6"));
- sch.persist();
-
ServiceComponent namenode = componentFactory.createNew(service, "NAMENODE");
service.addServiceComponent(namenode);
@@ -460,8 +457,6 @@ public class OrmTestHelper {
sch.setState(State.INSTALLED);
sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
sch.setStackVersion(new StackId("HDP-2.0.6"));
-
- sch.persist();
}
/**
@@ -489,8 +484,6 @@ public class OrmTestHelper {
sch.setState(State.INSTALLED);
sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
sch.setStackVersion(new StackId("HDP-2.0.6"));
-
- sch.persist();
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 6d19113..0163024 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -363,7 +363,6 @@ public class ComponentVersionCheckActionTest {
sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
sch.setStackVersion(cluster.getCurrentStackVersion());
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 60027c2..8f9d4f4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -1007,8 +1007,6 @@ public class UpgradeActionTest {
sch.setState(State.INSTALLED);
sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
sch.setStackVersion(cluster.getCurrentStackVersion());
-
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 59bb393..0cf7f09 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -216,43 +216,23 @@ public class ServiceComponentTest {
HostEntity hostEntity1 = hostDAO.findByName("h1");
assertNotNull(hostEntity1);
- ServiceComponentHost sch1 =
- serviceComponentHostFactory.createNew(sc, "h1");
- ServiceComponentHost sch2 =
- serviceComponentHostFactory.createNew(sc, "h2");
- ServiceComponentHost failSch =
- serviceComponentHostFactory.createNew(sc, "h2");
-
- Map<String, ServiceComponentHost> compHosts =
- new HashMap<String, ServiceComponentHost>();
- compHosts.put("h1", sch1);
- compHosts.put("h2", sch2);
- compHosts.put("h3", failSch);
+ ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
+ ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
try {
- sc.addServiceComponentHosts(compHosts);
+ sc.addServiceComponentHost("h2");
fail("Expected error for dups");
} catch (Exception e) {
// Expected
}
- Assert.assertTrue(sc.getServiceComponentHosts().isEmpty());
-
- compHosts.remove("h3");
- sc.addServiceComponentHosts(compHosts);
Assert.assertEquals(2, sc.getServiceComponentHosts().size());
- sch1.persist();
- sch2.persist();
-
ServiceComponentHost schCheck = sc.getServiceComponentHost("h2");
Assert.assertNotNull(schCheck);
Assert.assertEquals("h2", schCheck.getHostName());
- ServiceComponentHost sch3 =
- serviceComponentHostFactory.createNew(sc, "h3");
- sc.addServiceComponentHost(sch3);
- sch3.persist();
+ sc.addServiceComponentHost("h3");
Assert.assertNotNull(sc.getServiceComponentHost("h3"));
sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
@@ -307,7 +287,6 @@ public class ServiceComponentTest {
compHosts.put("h1", sch);
component.addServiceComponentHosts(compHosts);
Assert.assertEquals(1, component.getServiceComponentHosts().size());
- sch.persist();
ServiceComponent sc = service.getServiceComponent(componentName);
Assert.assertNotNull(sc);
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index fa5491e..4fdcc22 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -582,7 +582,6 @@ public class ClusterDeadlockTest {
sch.setDesiredStackVersion(stackId);
sch.setStackVersion(stackId);
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index ca12826..aad074e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -63,7 +63,7 @@ public class ClusterImplTest {
injector.getInstance(GuiceJpaInitializer.class);
clusters = injector.getInstance(Clusters.class);
}
-
+
@Test
public void testAddSessionAttributes() throws Exception {
Map<String, Object> attributes = new HashMap<String, Object>();
@@ -216,23 +216,21 @@ public class ClusterImplTest {
Service hdfs = cluster.addService("HDFS");
ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
- nameNode.addServiceComponentHost(hostName1).persist();
+ nameNode.addServiceComponentHost(hostName1);
ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
- dataNode.addServiceComponentHost(hostName1).persist();
- dataNode.addServiceComponentHost(hostName2).persist();
+ dataNode.addServiceComponentHost(hostName1);
+ dataNode.addServiceComponentHost(hostName2);
ServiceComponent hdfsClient = hdfs.addServiceComponent("HDFS_CLIENT");
- hdfsClient.addServiceComponentHost(hostName1).persist();
- hdfsClient.addServiceComponentHost(hostName2).persist();
+ hdfsClient.addServiceComponentHost(hostName1);
+ hdfsClient.addServiceComponentHost(hostName2);
Service tez = cluster.addService(serviceToDelete);
ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
ServiceComponentHost tezClientHost1 = tezClient.addServiceComponentHost(hostName1);
- tezClientHost1.persist();
ServiceComponentHost tezClientHost2 = tezClient.addServiceComponentHost(hostName2);
- tezClientHost2.persist();
// When
cluster.deleteService(serviceToDelete);
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 64d8184..dcbc435 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -419,26 +419,16 @@ public class ClusterTest {
sc2CompB.addServiceComponentHost(schHost1Serv2CompB);
sc3CompA.addServiceComponentHost(schHost1Serv3CompA);
sc3CompB.addServiceComponentHost(schHost1Serv3CompB);
- schHost1Serv1CompA.persist();
- schHost1Serv1CompB.persist();
- schHost1Serv1CompC.persist();
- schHost1Serv2CompA.persist();
- schHost1Serv2CompB.persist();
- schHost1Serv3CompA.persist();
- schHost1Serv3CompB.persist();
// Host 2 will have ZK_CLIENT and GANGLIA_MONITOR
ServiceComponentHost schHost2Serv2CompB = serviceComponentHostFactory.createNew(sc2CompB, "h-2");
ServiceComponentHost schHost2Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-2");
sc2CompB.addServiceComponentHost(schHost2Serv2CompB);
sc3CompB.addServiceComponentHost(schHost2Serv3CompB);
- schHost2Serv2CompB.persist();
- schHost2Serv3CompB.persist();
// Host 3 will have GANGLIA_MONITOR
ServiceComponentHost schHost3Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-3");
sc3CompB.addServiceComponentHost(schHost3Serv3CompB);
- schHost3Serv3CompB.persist();
// Verify count of components
List<ServiceComponentHost> scHost1 = cluster.getServiceComponentHosts("h-1");
@@ -689,7 +679,6 @@ public class ClusterTest {
ServiceComponentHost sch =
serviceComponentHostFactory.createNew(sc, "h1");
sc.addServiceComponentHost(sch);
- sch.persist();
List<ServiceComponentHost> scHosts = c1.getServiceComponentHosts("h1");
Assert.assertEquals(1, scHosts.size());
@@ -706,7 +695,6 @@ public class ClusterTest {
s1.addServiceComponent(sc1);
ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
sc1.addServiceComponentHost(sch1);
- sch1.persist();
}
} catch (ConcurrentModificationException e ) {
Assert.assertTrue("Failed to work concurrently with sch", false);
@@ -727,16 +715,13 @@ public class ClusterTest {
s.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
List<ServiceComponentHost> scHosts;
@@ -758,16 +743,13 @@ public class ClusterTest {
s.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
List<ServiceComponentHost> scHosts;
@@ -795,16 +777,13 @@ public class ClusterTest {
s.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
s.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
Map<String, Set<String>> componentHostMap;
@@ -833,22 +812,18 @@ public class ClusterTest {
sfHDFS.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
- schJTH1.persist();
Map<String, Set<String>> componentHostMap;
@@ -893,22 +868,18 @@ public class ClusterTest {
sfHDFS.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
- schJTH1.persist();
Map<String, Set<String>> componentHostMap;
@@ -954,22 +925,18 @@ public class ClusterTest {
sfHDFS.addServiceComponent(scNN);
ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
scNN.addServiceComponentHost(schNNH1);
- schNNH1.persist();
ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
sfHDFS.addServiceComponent(scDN);
ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
scDN.addServiceComponentHost(scDNH1);
- scDNH1.persist();
ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
scDN.addServiceComponentHost(scDNH2);
- scDNH2.persist();
ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
sfMR.addServiceComponent(scJT);
ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
scJT.addServiceComponentHost(schJTH1);
- schJTH1.persist();
Map<String, Set<String>> componentHostMap;
@@ -2094,9 +2061,6 @@ public class ClusterTest {
sc2CompA.addServiceComponentHost(schHost4Serv2CompA);
sc2CompB.addServiceComponentHost(schHost4Serv2CompB);
sc3CompB.addServiceComponentHost(schHost4Serv3CompB);
- schHost4Serv2CompA.persist();
- schHost4Serv2CompB.persist();
- schHost4Serv3CompB.persist();
simulateStackVersionListener(stackId, v1, cluster, hostComponentStateDAO.findByHost("h-4"));
@@ -2128,7 +2092,6 @@ public class ClusterTest {
clusters.mapHostToCluster("h-5", clusterName);
ServiceComponentHost schHost5Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-5");
sc3CompB.addServiceComponentHost(schHost5Serv3CompB);
- schHost5Serv3CompB.persist();
// Host 5 will be in OUT_OF_SYNC, so redistribute bits to it so that it reaches a state of INSTALLED
HostVersionEntity h5Version2 = hostVersionDAO.findByClusterStackVersionAndHost(clusterName, stackId, v2, "h-5");
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
index 3e526d9..27e6e13 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
@@ -420,7 +420,6 @@ public class ClustersDeadlockTest {
sch.setDesiredStackVersion(stackId);
sch.setStackVersion(stackId);
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 9e285c6..5886234 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -449,15 +449,12 @@ public class ClustersTest {
ServiceComponent serviceCheckNode = hdfs.addServiceComponent("HDFS_CLIENT");
ServiceComponentHost nameNodeHost = nameNode.addServiceComponentHost(h1);
- nameNodeHost.persist();
HostEntity nameNodeHostEntity = hostDAO.findByName(nameNodeHost.getHostName());
Assert.assertNotNull(nameNodeHostEntity);
ServiceComponentHost dataNodeHost = dataNode.addServiceComponentHost(h2);
- dataNodeHost.persist();
ServiceComponentHost serviceCheckNodeHost = serviceCheckNode.addServiceComponentHost(h2);
- serviceCheckNodeHost.persist();
serviceCheckNodeHost.setState(State.UNKNOWN);
HostComponentDesiredStateEntityPK hkdspk = new HostComponentDesiredStateEntityPK();
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index fbed6e2..2500a16 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -212,7 +212,6 @@ public class ConcurrentServiceConfigVersionTest {
sch.setDesiredStackVersion(stackId);
sch.setStackVersion(stackId);
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index bb55597..1f09002 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -241,7 +241,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
sch.setDesiredStackVersion(stackId);
sch.setStackVersion(stackId);
- sch.persist();
return sch;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/2c600829/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 48279af..14a8de6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -195,8 +195,6 @@ public class ServiceComponentHostTest {
ServiceComponentHost impl = serviceComponentHostFactory.createNew(
sc, hostName);
- impl.persist();
-
Assert.assertEquals(State.INIT, impl.getState());
Assert.assertEquals(State.INIT, impl.getDesiredState());
Assert.assertEquals(SecurityState.UNSECURED, impl.getSecurityState());
[10/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/42c61263
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/42c61263
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/42c61263
Branch: refs/heads/trunk
Commit: 42c6126332bc77dc8c0a5366f5e8cac8c7779df2
Parents: 5467ad0 a901d8a
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 5 08:33:46 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Oct 5 08:33:46 2016 -0400
----------------------------------------------------------------------
.../state/cluster/ClustersDeadlockTest.java | 137 +++++---
ambari-web/app/controllers/wizard.js | 14 +-
.../app/controllers/wizard/step7_controller.js | 20 +-
ambari-web/app/utils/config.js | 20 ++
ambari-web/test/controllers/wizard_test.js | 97 +-----
.../src/main/resources/ui/app/components.js | 2 +
.../ui/app/components/labelCapacityBar.js | 12 +-
.../resources/ui/app/components/queueBadge.js | 6 +-
.../ui/app/components/queueHierarchy.js | 11 +
.../resources/ui/app/components/queueMapping.js | 25 +-
.../resources/ui/app/components/queueSummary.js | 23 +-
.../ui/app/components/sunburstChart.js | 339 +++++++++++++++++++
.../ui/app/components/xmldiffViewer.js | 101 ++++++
.../resources/ui/app/controllers/advanced.js | 21 +-
.../resources/ui/app/controllers/capsched.js | 39 ++-
.../resources/ui/app/controllers/editqueue.js | 66 +++-
.../resources/ui/app/controllers/queuesconf.js | 106 +++++-
.../resources/ui/app/controllers/scheduler.js | 21 +-
.../src/main/resources/ui/app/models/queue.js | 9 +-
.../src/main/resources/ui/app/router.js | 49 ++-
.../resources/ui/app/styles/application.less | 59 ++++
.../src/main/resources/ui/app/templates.js | 2 +
.../resources/ui/app/templates/capsched.hbs | 6 +
.../ui/app/templates/capsched/advanced.hbs | 13 +-
.../capsched/partials/accessControlList.hbs | 4 +-
.../capsched/partials/labelCapacity.hbs | 4 +-
.../templates/capsched/partials/preemption.hbs | 4 +-
.../capsched/partials/queueCapacity.hbs | 34 +-
.../capsched/partials/queueResources.hbs | 4 +-
.../ui/app/templates/capsched/queuesconf.hbs | 15 +-
.../ui/app/templates/capsched/scheduler.hbs | 6 +-
.../app/templates/components/queueHierarchy.hbs | 8 +-
.../app/templates/components/queueMapping.hbs | 14 +-
.../app/templates/components/queueSummary.hbs | 13 -
.../app/templates/components/sunburstChart.hbs | 44 +++
.../app/templates/components/xmldiffViewer.hbs | 41 +++
.../src/main/resources/ui/bower.json | 13 +-
37 files changed, 1145 insertions(+), 257 deletions(-)
----------------------------------------------------------------------
[31/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/159ad003
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/159ad003
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/159ad003
Branch: refs/heads/trunk
Commit: 159ad00328af9ed7a7b7130904d53210a7b41146
Parents: 532caef 7be0418
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 18 23:02:47 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 18 23:02:47 2016 -0400
----------------------------------------------------------------------
.gitignore | 1 +
ambari-admin/pom.xml | 2 +-
.../main/resources/ui/admin-web/package.json | 2 +-
ambari-agent/conf/unix/ambari-agent.ini | 1 -
.../src/main/python/ambari_agent/ActionQueue.py | 16 +------
.../ambari_agent/PythonReflectiveExecutor.py | 25 +++-------
.../test/python/ambari_agent/TestActionQueue.py | 3 +-
.../main/python/ambari_commons/thread_utils.py | 43 -----------------
ambari-web/app/styles/common.less | 10 ++--
ambari-web/karma.conf.js | 4 +-
ambari-web/package.json | 9 ++--
ambari-web/pom.xml | 49 ++++++++++++++++----
contrib/views/capacity-scheduler/pom.xml | 4 +-
.../src/main/resources/ui/package.json | 2 +-
contrib/views/files/pom.xml | 4 +-
contrib/views/hawq/pom.xml | 2 +-
contrib/views/hive-next/pom.xml | 4 +-
.../src/main/resources/ui/hive-web/package.json | 2 +-
contrib/views/hive/pom.xml | 4 +-
.../src/main/resources/ui/hive-web/package.json | 2 +-
contrib/views/hueambarimigration/pom.xml | 4 +-
contrib/views/jobs/pom.xml | 4 +-
contrib/views/pig/pom.xml | 4 +-
.../src/main/resources/ui/pig-web/package.json | 2 +-
contrib/views/wfmanager/pom.xml | 4 +-
25 files changed, 83 insertions(+), 124 deletions(-)
----------------------------------------------------------------------
[21/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/18c74125
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/18c74125
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/18c74125
Branch: refs/heads/trunk
Commit: 18c74125a90169a88f712a7a48e15274e0fbb9e8
Parents: 10f44bc 9511638
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 12 10:41:42 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Oct 12 10:41:42 2016 -0400
----------------------------------------------------------------------
.../server/controller/KerberosHelper.java | 40 +++++-
.../server/controller/KerberosHelperImpl.java | 131 +++++++++----------
.../AbstractPrepareKerberosServerAction.java | 38 +++---
.../ConfigureAmbariIdentitiesServerAction.java | 2 +-
.../resources/stacks/HDP/2.0.6/kerberos.json | 40 ++++--
.../stacks/HDP/2.3/services/HDFS/widgets.json | 12 +-
.../server/controller/KerberosHelperTest.java | 84 ++++++++----
7 files changed, 202 insertions(+), 145 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/18c74125/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
[11/32] ambari git commit: AMBARI-18539 - Remove Unnecessary Locks
Inside Of Host Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
index eccc1ed..8efec98 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
@@ -17,15 +17,21 @@
*/
package org.apache.ambari.server.controller.internal;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.persist.PersistService;
-import com.google.inject.persist.Transactional;
-import com.google.inject.util.Modules;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -74,20 +80,16 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.Transactional;
+import com.google.inject.util.Modules;
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
/**
* UpgradeSummaryResourceProvider tests.
@@ -170,7 +172,6 @@ public class UpgradeSummaryResourceProviderTest {
hostAttributes.put("os_release_version", "6.4");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster("h1", "c1");
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java
index 0d7fc67..3b2460a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java
@@ -19,14 +19,15 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
public final class DefaultServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest {
@@ -54,7 +55,6 @@ public final class DefaultServiceCalculatedStateTest extends GeneralServiceCalcu
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java
index 1bc4214..fccc5e7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java
@@ -18,6 +18,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class FlumeServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
protected String getServiceName() {
@@ -53,7 +53,6 @@ public class FlumeServiceCalculatedStateTest extends GeneralServiceCalculatedSta
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java
index 35a7b67..378f51e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java
@@ -18,6 +18,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class HBaseServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
protected String getServiceName() {
@@ -54,7 +54,6 @@ public class HBaseServiceCalculatedStateTest extends GeneralServiceCalculatedSta
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
index a562962..99c90cf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
@@ -19,6 +19,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
protected String getServiceName() {
@@ -55,7 +55,6 @@ public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStat
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java
index ddf1503..2c4dec6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java
@@ -19,6 +19,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class HiveServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
protected String getServiceName() {
@@ -57,7 +57,6 @@ public class HiveServiceCalculatedStateTest extends GeneralServiceCalculatedStat
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java
index 7e1595e..f6bc2cb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java
@@ -18,6 +18,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class OozieServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
@Override
protected String getServiceName() {
@@ -53,7 +53,6 @@ public class OozieServiceCalculatedStateTest extends GeneralServiceCalculatedSta
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java
index b5cce0e..996ce69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java
@@ -19,6 +19,9 @@
package org.apache.ambari.server.controller.utilities.state;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.State;
import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
-
public class YarnServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest {
@Override
protected String getServiceName() {
@@ -55,7 +55,6 @@ public class YarnServiceCalculatedStateTest extends GeneralServiceCalculatedStat
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
ServiceComponentHost sch = secondMasterComponent.addServiceComponentHost(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index ce86f83..bdbaf9b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -110,7 +110,6 @@ public class EventsTest {
hostAttributes.put("os_release_version", "6.4");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
m_cluster = m_clusters.getCluster(m_clusterName);
Assert.assertNotNull(m_cluster);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
index 9135732..9a5802d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
@@ -387,7 +387,6 @@ public class HostVersionOutOfSyncListenerTest {
addHost("h2");
clusters.mapHostToCluster("h2", "c1");
clusters.getHost("h2").setState(HostState.HEALTHY);
- clusters.getHost("h2").persist();
StackId stackId = new StackId(this.stackId);
RepositoryVersionEntity repositoryVersionEntity = helper.getOrCreateRepositoryVersion(stackId,
@@ -445,8 +444,6 @@ public class HostVersionOutOfSyncListenerTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.4");
host1.setHostAttributes(hostAttributes);
-
- host1.persist();
}
private void addService(Cluster cl, List<String> hostList,
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index f605276..5280ae6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -410,7 +410,6 @@ public class OrmTestHelper {
hostAttributes.put("os_release_version", "6.4");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster(hostName, cluster.getClusterName());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 207b4c7..d12adde 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -152,7 +152,6 @@ public class ComponentVersionCheckActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
// Create the starting repo version
m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -208,7 +207,6 @@ public class ComponentVersionCheckActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 5462f7b..480dfb3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -1694,7 +1694,6 @@ public class ConfigureActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
// Creating starting repo
m_helper.getOrCreateRepositoryVersion(HDP_220_STACK, HDP_2_2_0_0);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 50ce7b4..d3d8b4c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -191,7 +191,6 @@ public class UpgradeActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
// Create the starting repo version
m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -229,7 +228,6 @@ public class UpgradeActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
// Create the starting repo version
m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -295,7 +293,6 @@ public class UpgradeActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
// without this, HostEntity will not have a relation to ClusterEntity
clusters.mapHostsToCluster(Collections.singleton(hostName), clusterName);
@@ -376,7 +373,6 @@ public class UpgradeActionTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index d8b6a83..80665a5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -72,8 +72,6 @@ public class ConfigGroupTest {
clusters.addHost("h2");
Assert.assertNotNull(clusters.getHost("h1"));
Assert.assertNotNull(clusters.getHost("h2"));
- clusters.getHost("h1").persist();
- clusters.getHost("h2").persist();
}
@After
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index caa06e5..56ae7ee 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -108,8 +108,6 @@ public class ConfigHelperTest {
clusters.addHost("h2");
Assert.assertNotNull(clusters.getHost("h1"));
Assert.assertNotNull(clusters.getHost("h2"));
- clusters.getHost("h1").persist();
- clusters.getHost("h2").persist();
// core-site
ConfigurationRequest cr = new ConfigurationRequest();
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
index c6c37c5..31d3028 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
@@ -74,9 +74,6 @@ public class RequestExecutionTest {
Assert.assertNotNull(clusters.getHost("h1"));
Assert.assertNotNull(clusters.getHost("h2"));
Assert.assertNotNull(clusters.getHost("h3"));
- clusters.getHost("h1").persist();
- clusters.getHost("h2").persist();
- clusters.getHost("h3").persist();
}
@After
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index b043da1..cfe5d61 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -18,10 +18,14 @@
package org.apache.ambari.server.state;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import junit.framework.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.ServiceComponentResponse;
@@ -46,13 +50,11 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import junit.framework.Assert;
public class ServiceComponentTest {
@@ -181,12 +183,10 @@ public class ServiceComponentTest {
h.setIPv6(hostname + "ipv6");
Map<String, String> hostAttributes = new HashMap<String, String>();
- hostAttributes.put("os_family", "redhat");
- hostAttributes.put("os_release_version", "6.3");
- h.setHostAttributes(hostAttributes);
-
+ hostAttributes.put("os_family", "redhat");
+ hostAttributes.put("os_release_version", "6.3");
+ h.setHostAttributes(hostAttributes);
- h.persist();
clusters.mapHostToCluster(hostname, clusterName);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
index 8f00b72..65751af 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
@@ -18,7 +18,6 @@
package org.apache.ambari.server.state;
-import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -40,6 +39,8 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
+import junit.framework.Assert;
+
public class ServiceTest {
private Clusters clusters;
@@ -339,8 +340,6 @@ public class ServiceTest {
hostAttributes.put("os_release_version", "6.3");
h.setHostAttributes(hostAttributes);
-
- h.persist();
clusters.mapHostToCluster(hostname, clusterName);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index c2e1f75..a1d4c4b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -508,8 +508,6 @@ public class UpgradeHelperTest {
Clusters clusters = injector.getInstance(Clusters.class);
Host h4 = clusters.getHost("h4");
h4.setState(HostState.HEARTBEAT_LOST);
- h4.persist();
-
List<ServiceComponentHost> schs = cluster.getServiceComponentHosts("h4");
assertEquals(1, schs.size());
@@ -1224,7 +1222,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
@@ -1441,7 +1438,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
@@ -1521,7 +1517,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
@@ -1587,7 +1582,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
@@ -1654,7 +1648,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
@@ -1773,7 +1766,6 @@ public class UpgradeHelperTest {
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster(hostName, clusterName);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index 2ed5a2d..ab2628b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -141,7 +141,6 @@ public class ClusterDeadlockTest {
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, "c1");
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index aca32e4..ba0ff11 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -18,11 +18,20 @@
package org.apache.ambari.server.state.cluster;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
import org.apache.ambari.server.HostNotFoundException;
import org.apache.ambari.server.controller.AmbariSessionManager;
@@ -35,26 +44,13 @@ import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
-import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.verify;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
public class ClusterImplTest {
@@ -211,11 +207,9 @@ public class ClusterImplTest {
Host host1 = clusters.getHost(hostName1);
host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host1.persist();
Host host2 = clusters.getHost(hostName2);
host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host2.persist();
clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
@@ -279,11 +273,9 @@ public class ClusterImplTest {
Host host1 = clusters.getHost(hostName1);
host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host1.persist();
Host host2 = clusters.getHost(hostName2);
host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host2.persist();
clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
@@ -320,11 +312,9 @@ public class ClusterImplTest {
Host host1 = clusters.getHost(hostName1);
host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host1.persist();
Host host2 = clusters.getHost(hostName2);
host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
- host2.persist();
clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 6005ab5..4d39f71 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -238,8 +238,6 @@ public class ClusterTest {
Set<String> hostNames = new HashSet<String>() {{ add("h1"); add("h2"); }};
for (String hostName : hostNames) {
clusters.addHost(hostName);
- Host host = clusters.getHost(hostName);
- host.persist();
HostEntity hostEntity = hostDAO.findByName(hostName);
hostEntity.setIpv4("ipv4");
@@ -510,7 +508,6 @@ public class ClusterTest {
host.setIPv4("ipv4");
host.setIPv6("ipv6");
host.setHostAttributes(hostAttributes);
- host.persist();
}
/**
@@ -1162,7 +1159,6 @@ public class ClusterTest {
host.setState(HostState.HEALTHY);
host.setHealthStatus(new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, ""));
host.setStatus(host.getHealthStatus().getHealthStatus().name());
- host.persist();
c1.setDesiredStackVersion(new StackId("HDP-2.0.6"));
clusters.mapHostToCluster("h3", "c1");
@@ -2316,7 +2312,6 @@ public class ClusterTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
h.setHostAttributes(hostAttributes);
- h.persist();
}
String v1 = "2.0.5-1";
@@ -2387,7 +2382,6 @@ public class ClusterTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
h.setHostAttributes(hostAttributes);
- h.persist();
}
String v1 = "2.0.5-1";
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
index 190f64d..d2d07b5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
@@ -25,9 +25,6 @@ import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
-import com.google.inject.Provider;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.ServiceComponentNotFoundException;
import org.apache.ambari.server.ServiceNotFoundException;
@@ -57,9 +54,12 @@ import com.google.inject.Guice;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Module;
+import com.google.inject.Provider;
import com.google.inject.persist.PersistService;
import com.google.inject.util.Modules;
+import junit.framework.Assert;
+
/**
* Tests AMBARI-9738 which produced a deadlock during read and writes between
* {@link ClustersImpl} and {@link ClusterImpl}.
@@ -294,7 +294,6 @@ public class ClustersDeadlockTest {
String hostName = "c64-" + hostNameCounter.getAndIncrement();
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, CLUSTER_NAME);
Thread.sleep(10);
@@ -322,7 +321,6 @@ public class ClustersDeadlockTest {
String hostName = "c64-" + hostNameCounter.getAndIncrement();
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, CLUSTER_NAME);
// create DATANODE on this host so that we end up exercising the
@@ -357,7 +355,6 @@ public class ClustersDeadlockTest {
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, CLUSTER_NAME);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index bb3db03..2f2be0c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -296,9 +296,6 @@ public class ClustersTest {
setOsFamily(clusters.getHost(h1), "redhat", "6.4");
setOsFamily(clusters.getHost(h2), "redhat", "5.9");
setOsFamily(clusters.getHost(h3), "redhat", "6.4");
- clusters.getHost(h1).persist();
- clusters.getHost(h2).persist();
- clusters.getHost(h3).persist();
try {
clusters.getClustersForHost(h4);
@@ -381,9 +378,6 @@ public class ClustersTest {
setOsFamily(clusters.getHost(h1), "redhat", "6.4");
setOsFamily(clusters.getHost(h2), "redhat", "5.9");
setOsFamily(clusters.getHost(h3), "redhat", "6.4");
- clusters.getHost(h1).persist();
- clusters.getHost(h2).persist();
- clusters.getHost(h3).persist();
clusters.mapHostToCluster(h1, c1);
clusters.mapHostToCluster(h2, c1);
@@ -435,8 +429,6 @@ public class ClustersTest {
Host host2 = clusters.getHost(h2);
setOsFamily(clusters.getHost(h1), "centos", "5.9");
setOsFamily(clusters.getHost(h2), "centos", "5.9");
- host1.persist();
- host2.persist();
clusters.mapHostsToCluster(new HashSet<String>() {
{
@@ -446,7 +438,6 @@ public class ClustersTest {
// host config override
host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);
- host1.persist();
Service hdfs = cluster.addService("HDFS");
hdfs.persist();
@@ -705,7 +696,6 @@ public class ClustersTest {
Host host = clusters.getHost(hostName);
setOsFamily(clusters.getHost(hostName), "centos", "5.9");
- host.persist();
Set<String> hostnames = new HashSet<>();
hostnames.add(hostName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index ff5cbe8..992b8fe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -115,7 +115,6 @@ public class ConcurrentServiceConfigVersionTest {
String hostName = "c6401.ambari.apache.org";
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, "c1");
Service service = installService("HDFS");
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 7d2ba4d..0f272f6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -130,7 +130,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
String hostName = "c6401";
clusters.addHost(hostName);
setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
- clusters.getHost(hostName).persist();
clusters.mapHostToCluster(hostName, "c1");
Service service = installService("HDFS");
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java
index 4ff9bf4..fa8622d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java
@@ -17,25 +17,21 @@
*/
package org.apache.ambari.server.state.host;
-import com.google.gson.Gson;
-import com.google.inject.Injector;
+import static org.easymock.EasyMock.expect;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+
import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.dao.HostStateDAO;
import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.orm.entities.HostStateEntity;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.HostHealthStatus;
+import org.easymock.EasyMockSupport;
import org.junit.Test;
-import java.util.Map;
-
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.*;
+import com.google.gson.Gson;
-public class HostImplTest {
+public class HostImplTest extends EasyMockSupport {
@Test
public void testGetHostAttributes() throws Exception {
@@ -43,36 +39,30 @@ public class HostImplTest {
HostEntity hostEntity = createNiceMock(HostEntity.class);
HostStateEntity hostStateEntity = createNiceMock(HostStateEntity.class);
HostDAO hostDAO = createNiceMock(HostDAO.class);
- Injector injector = createNiceMock(Injector.class);
HostStateDAO hostStateDAO = createNiceMock(HostStateDAO.class);
-
Gson gson = new Gson();
- expect(injector.getInstance(Gson.class)).andReturn(gson).anyTimes();
- expect(injector.getInstance(HostDAO.class)).andReturn(hostDAO).anyTimes();
- expect(injector.getInstance(HostStateDAO.class)).andReturn(hostStateDAO).anyTimes();
expect(hostEntity.getHostAttributes()).andReturn("{\"foo\": \"aaa\", \"bar\":\"bbb\"}").anyTimes();
expect(hostEntity.getHostId()).andReturn(1L).anyTimes();
expect(hostEntity.getHostName()).andReturn("host1").anyTimes();
expect(hostEntity.getHostStateEntity()).andReturn(hostStateEntity).anyTimes();
- expect(hostDAO.findById(1L)).andReturn(hostEntity).once();
- expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).once();
+ expect(hostDAO.findById(1L)).andReturn(hostEntity).atLeastOnce();
- replay(hostEntity, hostStateEntity, injector, hostDAO);
- HostImpl host = new HostImpl(hostEntity, false, injector);
+ replayAll();
+ HostImpl host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
Map<String, String> hostAttributes = host.getHostAttributes();
assertEquals("aaa", hostAttributes.get("foo"));
assertEquals("bbb", hostAttributes.get("bar"));
- host = new HostImpl(hostEntity, true, injector);
+ host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
hostAttributes = host.getHostAttributes();
assertEquals("aaa", hostAttributes.get("foo"));
assertEquals("bbb", hostAttributes.get("bar"));
- verify(hostEntity, hostStateEntity, injector, hostDAO);
+ verifyAll();
}
@Test
@@ -82,29 +72,25 @@ public class HostImplTest {
HostStateEntity hostStateEntity = createNiceMock(HostStateEntity.class);
HostDAO hostDAO = createNiceMock(HostDAO.class);
HostStateDAO hostStateDAO = createNiceMock(HostStateDAO.class);
- Injector injector = createNiceMock(Injector.class);
Gson gson = new Gson();
- expect(injector.getInstance(Gson.class)).andReturn(gson).anyTimes();
- expect(injector.getInstance(HostDAO.class)).andReturn(hostDAO).anyTimes();
- expect(injector.getInstance(HostStateDAO.class)).andReturn(hostStateDAO).anyTimes();
expect(hostEntity.getHostAttributes()).andReturn("{\"foo\": \"aaa\", \"bar\":\"bbb\"}").anyTimes();
expect(hostEntity.getHostName()).andReturn("host1").anyTimes();
expect(hostEntity.getHostId()).andReturn(1L).anyTimes();
expect(hostEntity.getHostStateEntity()).andReturn(hostStateEntity).anyTimes();
expect(hostDAO.findById(1L)).andReturn(hostEntity).anyTimes();
- expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).once();
+ expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).atLeastOnce();
- replay(hostEntity, hostStateEntity, injector, hostDAO);
- HostImpl host = new HostImpl(hostEntity, false, injector);
+ replayAll();
+ HostImpl host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
host.getHealthStatus();
- host = new HostImpl(hostEntity, true, injector);
+ host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
host.getHealthStatus();
- verify(hostEntity, hostStateEntity, injector, hostDAO);
+ verifyAll();
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
index 99fc0a1..596f381 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
@@ -164,9 +164,11 @@ public class HostTest {
HostRegistrationRequestEvent e =
new HostRegistrationRequestEvent("foo", agentVersion, currentTime,
info, agentEnv);
+
if (!firstReg) {
- Assert.assertTrue(host.isPersisted());
+ Assert.assertNotNull(host.getHostId());
}
+
host.handleEvent(e);
Assert.assertEquals(currentTime, host.getLastRegistrationTime());
@@ -378,7 +380,6 @@ public class HostTest {
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
- host.persist();
c1.setDesiredStackVersion(stackId);
clusters.mapHostToCluster("h1", "c1");
@@ -437,8 +438,6 @@ public class HostTest {
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
- host.persist();
-
helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
RepositoryVersionState.INSTALLING);
@@ -457,4 +456,19 @@ public class HostTest {
Assert.assertNotNull(stateEntity.getMaintenanceState());
Assert.assertEquals(MaintenanceState.ON, host.getMaintenanceState(c1.getClusterId()));
}
+
+ @Test
+ public void testHostPersist() throws Exception {
+ clusters.addHost("foo");
+ Host host = clusters.getHost("foo");
+
+ String rackInfo = "rackInfo";
+ long lastRegistrationTime = System.currentTimeMillis();
+
+ host.setRackInfo(rackInfo);
+ host.setLastRegistrationTime(lastRegistrationTime);
+
+ Assert.assertEquals(rackInfo, host.getRackInfo());
+ Assert.assertEquals(lastRegistrationTime, host.getLastRegistrationTime());
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 243cb4f..7f12eb7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -150,7 +150,6 @@ public class ServiceComponentHostTest {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
host.setHostAttributes(hostAttributes);
- host.persist();
}
clusterEntity.setHostEntities(hostEntities);
[20/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/10f44bca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/10f44bca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/10f44bca
Branch: refs/heads/trunk
Commit: 10f44bcadbac846e0719c8d91c81f18a16b2d764
Parents: ed2018b 1d54fef
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Oct 11 21:42:08 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Oct 11 21:42:08 2016 -0400
----------------------------------------------------------------------
.../python/ambari_agent/apscheduler/scheduler.py | 1 +
.../ambari_agent/apscheduler/threadpool.py | 1 +
ambari-server/pom.xml | 3 +++
...rnTimelineServerStatePreservingCheckTest.java | 3 +++
.../server/security/SecurityFilterTest.java | 19 +++++++++++++++----
.../state/cluster/ClusterDeadlockTest.java | 15 ++++++++-------
.../server/testing/DeadlockWarningThread.java | 13 ++++++++++---
.../server/upgrade/UpgradeCatalog150Test.java | 7 ++++++-
.../server/upgrade/UpgradeCatalog151Test.java | 3 +++
.../server/upgrade/UpgradeCatalog160Test.java | 3 +++
.../server/upgrade/UpgradeCatalog161Test.java | 3 +++
.../server/upgrade/UpgradeCatalog170Test.java | 3 +++
12 files changed, 59 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/10f44bca/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
[12/32] ambari git commit: AMBARI-18539 - Remove Unnecessary Locks
Inside Of Host Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-18539 - Remove Unnecessary Locks Inside Of Host Business Object Implementations (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/38700445
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/38700445
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/38700445
Branch: refs/heads/trunk
Commit: 38700445bd793d27a8747d4c1d06b70f531ab677
Parents: 42c6126
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 5 15:44:18 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Oct 6 11:33:46 2016 -0400
----------------------------------------------------------------------
.../orm/dao/ConfigGroupHostMappingDAO.java | 137 +--
.../org/apache/ambari/server/state/Host.java | 9 -
.../server/state/cluster/ClustersImpl.java | 4 +-
.../ambari/server/state/host/HostFactory.java | 2 +-
.../ambari/server/state/host/HostImpl.java | 933 ++++++-------------
.../ambari/server/topology/TopologyManager.java | 1 -
.../ExecutionCommandWrapperTest.java | 1 -
.../actionmanager/TestActionDBAccessorImpl.java | 9 -
.../server/actionmanager/TestActionManager.java | 8 +-
.../ambari/server/agent/AgentResourceTest.java | 2 +
.../server/agent/HeartbeatProcessorTest.java | 1 -
.../server/agent/HeartbeatTestHelper.java | 1 -
.../server/agent/TestHeartbeatMonitor.java | 8 -
.../server/checks/InstallPackagesCheckTest.java | 5 +-
.../AmbariCustomCommandExecutionHelperTest.java | 1 -
.../AmbariManagementControllerTest.java | 26 +-
.../BackgroundCustomCommandExecutionTest.java | 1 -
...hYarnCapacitySchedulerReleaseConfigTest.java | 7 +-
.../internal/JMXHostProviderTest.java | 6 -
.../StackDefinedPropertyProviderTest.java | 1 -
.../UpgradeResourceProviderHDP22Test.java | 1 -
.../internal/UpgradeResourceProviderTest.java | 2 -
.../UpgradeSummaryResourceProviderTest.java | 47 +-
.../DefaultServiceCalculatedStateTest.java | 6 +-
.../state/FlumeServiceCalculatedStateTest.java | 7 +-
.../state/HBaseServiceCalculatedStateTest.java | 7 +-
.../state/HDFSServiceCalculatedStateTest.java | 7 +-
.../state/HiveServiceCalculatedStateTest.java | 7 +-
.../state/OozieServiceCalculatedStateTest.java | 7 +-
.../state/YarnServiceCalculatedStateTest.java | 7 +-
.../apache/ambari/server/events/EventsTest.java | 1 -
.../HostVersionOutOfSyncListenerTest.java | 3 -
.../apache/ambari/server/orm/OrmTestHelper.java | 1 -
.../ComponentVersionCheckActionTest.java | 2 -
.../upgrades/ConfigureActionTest.java | 1 -
.../upgrades/UpgradeActionTest.java | 4 -
.../ambari/server/state/ConfigGroupTest.java | 2 -
.../ambari/server/state/ConfigHelperTest.java | 2 -
.../server/state/RequestExecutionTest.java | 3 -
.../server/state/ServiceComponentTest.java | 30 +-
.../apache/ambari/server/state/ServiceTest.java | 5 +-
.../ambari/server/state/UpgradeHelperTest.java | 8 -
.../state/cluster/ClusterDeadlockTest.java | 1 -
.../server/state/cluster/ClusterImplTest.java | 46 +-
.../server/state/cluster/ClusterTest.java | 6 -
.../state/cluster/ClustersDeadlockTest.java | 9 +-
.../server/state/cluster/ClustersTest.java | 10 -
.../ConcurrentServiceConfigVersionTest.java | 1 -
...omponentHostConcurrentWriteDeadlockTest.java | 1 -
.../ambari/server/state/host/HostImplTest.java | 50 +-
.../ambari/server/state/host/HostTest.java | 22 +-
.../svccomphost/ServiceComponentHostTest.java | 1 -
52 files changed, 510 insertions(+), 960 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
index 71d93cc..28b9fea 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
@@ -17,10 +17,16 @@
*/
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.orm.RequiresSession;
@@ -40,16 +46,10 @@ import org.apache.ambari.server.state.host.HostFactory;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.Predicate;
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.HashMap;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
@Singleton
public class ConfigGroupHostMappingDAO {
@@ -65,16 +65,16 @@ public class ConfigGroupHostMappingDAO {
private HostFactory hostFactory;
@Inject
Clusters clusters;
-
+
private final ReadWriteLock gl = new ReentrantReadWriteLock();
-
+
private Map<Long, Set<ConfigGroupHostMapping>> configGroupHostMappingByHost;
-
+
private volatile boolean cacheLoaded;
-
+
private void populateCache() {
-
+
if (!cacheLoaded) {
gl.writeLock().lock();
try {
@@ -106,10 +106,10 @@ public class ConfigGroupHostMappingDAO {
} finally {
gl.writeLock().unlock();
}
-
+
}
-
+
}
/**
@@ -121,50 +121,51 @@ public class ConfigGroupHostMappingDAO {
@RequiresSession
public ConfigGroupHostMappingEntity findByPK(final ConfigGroupHostMappingEntityPK
configGroupHostMappingEntityPK) {
-
+
return entityManagerProvider.get()
.find(ConfigGroupHostMappingEntity.class, configGroupHostMappingEntityPK);
}
@RequiresSession
public Set<ConfigGroupHostMapping> findByHostId(Long hostId) {
-
+
populateCache();
-
- if (!configGroupHostMappingByHost.containsKey(hostId))
+
+ if (!configGroupHostMappingByHost.containsKey(hostId)) {
return null;
-
+ }
+
Set<ConfigGroupHostMapping> set = new HashSet<ConfigGroupHostMapping>(configGroupHostMappingByHost.get(hostId));
-
+
return set;
-
+
}
@RequiresSession
public Set<ConfigGroupHostMapping> findByGroup(final Long groupId) {
-
+
populateCache();
-
+
Set<ConfigGroupHostMapping> result = new HashSet<ConfigGroupHostMapping>();
-
+
for (Set<ConfigGroupHostMapping> item : configGroupHostMappingByHost.values()) {
-
+
Set<ConfigGroupHostMapping> setByHost = new HashSet<ConfigGroupHostMapping>(item);
-
+
CollectionUtils.filter(setByHost, new Predicate() {
-
+
@Override
public boolean evaluate(Object arg0) {
return ((ConfigGroupHostMapping) arg0).getConfigGroupId().equals(groupId);
}
});
-
+
result.addAll(setByHost);
-
+
}
-
+
return result;
-
+
}
@RequiresSession
@@ -178,33 +179,33 @@ public class ConfigGroupHostMappingDAO {
populateCache();
entityManagerProvider.get().persist(configGroupHostMappingEntity);
-
+
//create in cache
Set<ConfigGroupHostMapping> set = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
if (set == null){
set = new HashSet<ConfigGroupHostMapping>();
configGroupHostMappingByHost.put(configGroupHostMappingEntity.getHostId(), set);
}
-
+
set.add(buildConfigGroupHostMapping(configGroupHostMappingEntity));
}
@Transactional
public ConfigGroupHostMappingEntity merge(ConfigGroupHostMappingEntity configGroupHostMappingEntity) {
-
+
populateCache();
-
+
Set<ConfigGroupHostMapping> set = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
if (set == null){
set = new HashSet<ConfigGroupHostMapping>();
configGroupHostMappingByHost.put(configGroupHostMappingEntity.getHostId(), set);
}
-
+
//Update object in set
set.remove(buildConfigGroupHostMapping(configGroupHostMappingEntity));
set.add(buildConfigGroupHostMapping(configGroupHostMappingEntity));
-
-
+
+
return entityManagerProvider.get().merge(configGroupHostMappingEntity);
}
@@ -213,23 +214,23 @@ public class ConfigGroupHostMappingDAO {
configGroupHostMappingEntity) {
cacheLoaded = false;
populateCache();
-
+
entityManagerProvider.get().refresh(configGroupHostMappingEntity);
}
@Transactional
public void remove(final ConfigGroupHostMappingEntity
configGroupHostMappingEntity) {
-
+
populateCache();
-
+
entityManagerProvider.get().remove(merge(configGroupHostMappingEntity));
-
+
Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
-
+
if (setByHost != null) {
CollectionUtils.filter(setByHost, new Predicate() {
-
+
@Override
public boolean evaluate(Object arg0) {
return !((ConfigGroupHostMapping) arg0).getConfigGroupId().
@@ -243,14 +244,14 @@ public class ConfigGroupHostMappingDAO {
public void removeByPK(final ConfigGroupHostMappingEntityPK
configGroupHostMappingEntityPK) {
populateCache();
-
+
entityManagerProvider.get().remove(findByPK(configGroupHostMappingEntityPK));
-
+
Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(configGroupHostMappingEntityPK.getHostId());
-
+
if (setByHost != null) {
CollectionUtils.filter(setByHost, new Predicate() {
-
+
@Override
public boolean evaluate(Object arg0) {
return !((ConfigGroupHostMapping) arg0).getConfigGroupId().
@@ -258,7 +259,7 @@ public class ConfigGroupHostMappingDAO {
}
});
}
-
+
}
@Transactional
@@ -273,18 +274,18 @@ public class ConfigGroupHostMappingDAO {
// Flush to current transaction required in order to avoid Eclipse link
// from re-ordering delete
entityManagerProvider.get().flush();
-
+
for (Set<ConfigGroupHostMapping> setByHost : configGroupHostMappingByHost.values()) {
-
+
CollectionUtils.filter(setByHost, new Predicate() {
-
+
@Override
public boolean evaluate(Object arg0) {
return !((ConfigGroupHostMapping) arg0).getConfigGroupId().equals(groupId);
}
});
}
-
+
}
@Transactional
@@ -294,22 +295,22 @@ public class ConfigGroupHostMappingDAO {
"confighosts.hostId = ?1", String.class);
daoUtils.executeUpdate(query, hostId);
-
-
+
+
Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(hostId);
-
+
setByHost.clear();
}
-
+
private ConfigGroupHostMapping buildConfigGroupHostMapping(
ConfigGroupHostMappingEntity configGroupHostMappingEntity) {
-
+
ConfigGroupHostMappingImpl configGroupHostMapping = new ConfigGroupHostMappingImpl();
configGroupHostMapping.setConfigGroup(buildConfigGroup(configGroupHostMappingEntity.getConfigGroupEntity()));
configGroupHostMapping.setConfigGroupId(configGroupHostMappingEntity.getConfigGroupId());
configGroupHostMapping.setHost(buildHost(configGroupHostMappingEntity.getHostEntity()));
configGroupHostMapping.setHostId(configGroupHostMappingEntity.getHostId());
-
+
return configGroupHostMapping;
}
@@ -321,12 +322,12 @@ public class ConfigGroupHostMappingDAO {
//almost impossible
}
ConfigGroup configGroup = configGroupFactory.createExisting(cluster, configGroupEntity);
-
+
return configGroup;
}
private Host buildHost(HostEntity hostEntity) {
- Host host = hostFactory.create(hostEntity, false);
+ Host host = hostFactory.create(hostEntity);
return host;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
index bd3f8bf..0a9c82a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
@@ -43,11 +43,6 @@ public interface Host extends Comparable {
Long getHostId();
/**
- * @param hostName the hostName to set
- */
- void setHostName(String hostName);
-
- /**
* @return the currentPingPort
*/
Integer getCurrentPingPort();
@@ -338,10 +333,6 @@ public interface Host extends Comparable {
HostResponse convertToResponse();
- boolean isPersisted();
-
- void persist();
-
void refresh();
void importHostInfo(HostInfo hostInfo);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 7b119f2..aff3a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -185,7 +185,7 @@ public class ClustersImpl implements Clusters {
private void loadClustersAndHosts() {
List<HostEntity> hostEntities = hostDAO.findAll();
for (HostEntity hostEntity : hostEntities) {
- Host host = hostFactory.create(hostEntity, true);
+ Host host = hostFactory.create(hostEntity);
hosts.put(hostEntity.getHostName(), host);
hostsById.put(hostEntity.getHostId(), host);
}
@@ -411,7 +411,7 @@ public class ClustersImpl implements Clusters {
hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
// not stored to DB
- Host host = hostFactory.create(hostEntity, false);
+ Host host = hostFactory.create(hostEntity);
host.setAgentVersion(new AgentVersion(""));
List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
host.setDisksInfo(emptyDiskList);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
index 5cfbbef..ae22c79 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
@@ -21,5 +21,5 @@ import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.state.Host;
public interface HostFactory {
- Host create(HostEntity hostEntity, boolean persisted);
+ Host create(HostEntity hostEntity);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 91b6360..ec8873a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -22,6 +22,8 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -65,13 +67,13 @@ import org.apache.ambari.server.state.fsm.SingleArcTransition;
import org.apache.ambari.server.state.fsm.StateMachine;
import org.apache.ambari.server.state.fsm.StateMachineFactory;
import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.persist.Transactional;
@@ -96,11 +98,12 @@ public class HostImpl implements Host {
private static final String TIMEZONE = "timezone";
private static final String OS_RELEASE_VERSION = "os_release_version";
-
+ @Inject
private final Gson gson;
private static final Type hostAttributesType =
new TypeToken<Map<String, String>>() {}.getType();
+
private static final Type maintMapType =
new TypeToken<Map<Long, MaintenanceState>>() {}.getType();
@@ -108,26 +111,43 @@ public class HostImpl implements Host {
private final Lock readLock;
private final Lock writeLock;
- // TODO : caching the JPA entities here causes issues if they become stale and get re-merged.
- private HostEntity hostEntity;
- private HostStateEntity hostStateEntity;
-
+ @Inject
private HostDAO hostDAO;
+
+ @Inject
private HostStateDAO hostStateDAO;
+
+ @Inject
private HostVersionDAO hostVersionDAO;
+
+ @Inject
private ClusterDAO clusterDAO;
+
+ @Inject
private Clusters clusters;
+
+ @Inject
private HostConfigMappingDAO hostConfigMappingDAO;
+ /**
+ * The ID of the host which is to retrieve it from JPA.
+ */
+ private final long hostId;
+
+ /**
+ * The name of the host, stored inside of this business object to prevent JPA
+ * lookups since it never changes.
+ */
+ private final String hostName;
+
private long lastHeartbeatTime = 0L;
private AgentEnv lastAgentEnv = null;
private List<DiskInfo> disksInfo = new ArrayList<DiskInfo>();
private RecoveryReport recoveryReport = new RecoveryReport();
- private boolean persisted = false;
private Integer currentPingPort = null;
private final StateMachine<HostState, HostEventType, HostEvent> stateMachine;
- private Map<Long, MaintenanceState> maintMap = null;
+ private final ConcurrentMap<Long, MaintenanceState> maintMap;
// In-memory status, based on host components states
private String status = HealthStatus.UNKNOWN.name();
@@ -141,7 +161,8 @@ public class HostImpl implements Host {
@Inject
private AmbariEventPublisher eventPublisher;
- private static TopologyManager topologyManager;
+ @Inject
+ private TopologyManager topologyManager;
private static final StateMachineFactory
<HostImpl, HostState, HostEventType, HostEvent>
@@ -229,38 +250,49 @@ public class HostImpl implements Host {
.installTopology();
@Inject
- public HostImpl(@Assisted HostEntity hostEntity,
- @Assisted boolean persisted, Injector injector) {
+ public HostImpl(@Assisted HostEntity hostEntity, Gson gson, HostDAO hostDAO, HostStateDAO hostStateDAO) {
+ this.gson = gson;
+ this.hostDAO = hostDAO;
+ this.hostStateDAO = hostStateDAO;
+
stateMachine = stateMachineFactory.make(this);
rwLock = new ReentrantReadWriteLock();
readLock = rwLock.readLock();
writeLock = rwLock.writeLock();
- this.hostEntity = hostEntity;
- this.persisted = persisted;
- hostDAO = injector.getInstance(HostDAO.class);
- hostStateDAO = injector.getInstance(HostStateDAO.class);
- hostVersionDAO = injector.getInstance(HostVersionDAO.class);
- gson = injector.getInstance(Gson.class);
- clusterDAO = injector.getInstance(ClusterDAO.class);
- clusters = injector.getInstance(Clusters.class);
- hostConfigMappingDAO = injector.getInstance(HostConfigMappingDAO.class);
- //todo: proper static injection
- HostImpl.topologyManager = injector.getInstance(TopologyManager.class);
-
- hostStateEntity = hostEntity.getHostStateEntity();
+ HostStateEntity hostStateEntity = hostEntity.getHostStateEntity();
if (hostStateEntity == null) {
hostStateEntity = new HostStateEntity();
hostStateEntity.setHostEntity(hostEntity);
hostEntity.setHostStateEntity(hostStateEntity);
hostStateEntity.setHealthStatus(gson.toJson(new HostHealthStatus(HealthStatus.UNKNOWN, "")));
- if (persisted) {
- hostStateDAO.create(hostStateEntity);
- }
} else {
stateMachine.setCurrentState(hostStateEntity.getCurrentState());
}
+ // persist the host
+ if (null == hostEntity.getHostId()) {
+ persistEntities(hostEntity);
+
+ refresh();
+
+ for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
+ try {
+ clusters.getClusterById(clusterEntity.getClusterId()).refresh();
+ } catch (AmbariException e) {
+ LOG.error("Error while looking up the cluster", e);
+ throw new RuntimeException("Cluster '" + clusterEntity.getClusterId() + "' was removed",
+ e);
+ }
+ }
+ }
+
+ // set the host ID which will be used to retrieve it from JPA
+ hostId = hostEntity.getHostId();
+ hostName = hostEntity.getHostName();
+
+ // populate the maintenance map
+ maintMap = ensureMaintMap(hostEntity.getHostStateEntity());
}
@Override
@@ -296,7 +328,6 @@ public class HostImpl implements Host {
+ ", registrationTime=" + e.registrationTime
+ ", agentVersion=" + agentVersion);
- host.persist();
host.clusters.updateHostMappings(host);
//todo: proper host joined notification
@@ -307,10 +338,10 @@ public class HostImpl implements Host {
associatedWithCluster = false;
} catch (AmbariException e1) {
// only HostNotFoundException is thrown
- e1.printStackTrace();
+ LOG.error("Unable to determine the clusters for host", e1);
}
- topologyManager.onHostRegistered(host, associatedWithCluster);
+ host.topologyManager.onHostRegistered(host, associatedWithCluster);
}
}
@@ -405,7 +436,7 @@ public class HostImpl implements Host {
+ ", lastHeartbeatTime=" + host.getLastHeartbeatTime());
host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, host.getHealthStatus().getHealthReport()));
- topologyManager.onHostHeartBeatLost(host);
+ host.topologyManager.onHostHeartBeatLost(host);
}
}
@@ -413,160 +444,120 @@ public class HostImpl implements Host {
* @param hostInfo the host information
*/
@Override
+ @Transactional
public void importHostInfo(HostInfo hostInfo) {
- try {
- writeLock.lock();
-
- if (hostInfo.getIPAddress() != null
- && !hostInfo.getIPAddress().isEmpty()) {
- setIPv4(hostInfo.getIPAddress());
- setIPv6(hostInfo.getIPAddress());
- }
+ if (hostInfo.getIPAddress() != null && !hostInfo.getIPAddress().isEmpty()) {
+ setIPv4(hostInfo.getIPAddress());
+ setIPv6(hostInfo.getIPAddress());
+ }
- setCpuCount(hostInfo.getProcessorCount());
- setPhCpuCount(hostInfo.getPhysicalProcessorCount());
- setTotalMemBytes(hostInfo.getMemoryTotal());
- setAvailableMemBytes(hostInfo.getFreeMemory());
+ setCpuCount(hostInfo.getProcessorCount());
+ setPhCpuCount(hostInfo.getPhysicalProcessorCount());
+ setTotalMemBytes(hostInfo.getMemoryTotal());
+ setAvailableMemBytes(hostInfo.getFreeMemory());
- if (hostInfo.getArchitecture() != null
- && !hostInfo.getArchitecture().isEmpty()) {
- setOsArch(hostInfo.getArchitecture());
- }
+ if (hostInfo.getArchitecture() != null && !hostInfo.getArchitecture().isEmpty()) {
+ setOsArch(hostInfo.getArchitecture());
+ }
- if (hostInfo.getOS() != null
- && !hostInfo.getOS().isEmpty()) {
- String osType = hostInfo.getOS();
- if (hostInfo.getOSRelease() != null) {
- String[] release = hostInfo.getOSRelease().split("\\.");
- if (release.length > 0) {
- osType += release[0];
- }
+ if (hostInfo.getOS() != null && !hostInfo.getOS().isEmpty()) {
+ String osType = hostInfo.getOS();
+ if (hostInfo.getOSRelease() != null) {
+ String[] release = hostInfo.getOSRelease().split("\\.");
+ if (release.length > 0) {
+ osType += release[0];
}
- setOsType(osType.toLowerCase());
- }
-
- if (hostInfo.getMounts() != null
- && !hostInfo.getMounts().isEmpty()) {
- setDisksInfo(hostInfo.getMounts());
}
+ setOsType(osType.toLowerCase());
+ }
- // FIXME add all other information into host attributes
- setAgentVersion(new AgentVersion(
- hostInfo.getAgentUserId()));
-
- Map<String, String> attrs = new HashMap<String, String>();
- if (hostInfo.getHardwareIsa() != null) {
- attrs.put(HARDWAREISA, hostInfo.getHardwareIsa());
- }
- if (hostInfo.getHardwareModel() != null) {
- attrs.put(HARDWAREMODEL, hostInfo.getHardwareModel());
- }
- if (hostInfo.getInterfaces() != null) {
- attrs.put(INTERFACES, hostInfo.getInterfaces());
- }
- if (hostInfo.getKernel() != null) {
- attrs.put(KERNEL, hostInfo.getKernel());
- }
- if (hostInfo.getKernelMajVersion() != null) {
- attrs.put(KERNELMAJOREVERSON, hostInfo.getKernelMajVersion());
- }
- if (hostInfo.getKernelRelease() != null) {
- attrs.put(KERNELRELEASE, hostInfo.getKernelRelease());
- }
- if (hostInfo.getKernelVersion() != null) {
- attrs.put(KERNELVERSION, hostInfo.getKernelVersion());
- }
- if (hostInfo.getMacAddress() != null) {
- attrs.put(MACADDRESS, hostInfo.getMacAddress());
- }
- if (hostInfo.getNetMask() != null) {
- attrs.put(NETMASK, hostInfo.getNetMask());
- }
- if (hostInfo.getOSFamily() != null) {
- attrs.put(OSFAMILY, hostInfo.getOSFamily());
- }
- if (hostInfo.getPhysicalProcessorCount() != 0) {
- attrs.put(PHYSICALPROCESSORCOUNT,
- Long.toString(hostInfo.getPhysicalProcessorCount()));
- }
- if (hostInfo.getProcessorCount() != 0) {
- attrs.put(PROCESSORCOUNT,
- Long.toString(hostInfo.getProcessorCount()));
- }
- if (Boolean.toString(hostInfo.getSeLinux()) != null) {
- attrs.put(SELINUXENABLED, Boolean.toString(hostInfo.getSeLinux()));
- }
- if (hostInfo.getSwapSize() != null) {
- attrs.put(SWAPSIZE, hostInfo.getSwapSize());
- }
- if (hostInfo.getSwapFree() != null) {
- attrs.put(SWAPFREE, hostInfo.getSwapFree());
- }
- if (hostInfo.getTimeZone() != null) {
- attrs.put(TIMEZONE, hostInfo.getTimeZone());
- }
- if (hostInfo.getOSRelease() != null) {
- attrs.put(OS_RELEASE_VERSION, hostInfo.getOSRelease());
- }
+ if (hostInfo.getMounts() != null && !hostInfo.getMounts().isEmpty()) {
+ setDisksInfo(hostInfo.getMounts());
+ }
- setHostAttributes(attrs);
+ // FIXME add all other information into host attributes
+ setAgentVersion(new AgentVersion(hostInfo.getAgentUserId()));
- saveIfPersisted();
+ Map<String, String> attrs = new HashMap<String, String>();
+ if (hostInfo.getHardwareIsa() != null) {
+ attrs.put(HARDWAREISA, hostInfo.getHardwareIsa());
}
- finally {
- writeLock.unlock();
+ if (hostInfo.getHardwareModel() != null) {
+ attrs.put(HARDWAREMODEL, hostInfo.getHardwareModel());
+ }
+ if (hostInfo.getInterfaces() != null) {
+ attrs.put(INTERFACES, hostInfo.getInterfaces());
+ }
+ if (hostInfo.getKernel() != null) {
+ attrs.put(KERNEL, hostInfo.getKernel());
+ }
+ if (hostInfo.getKernelMajVersion() != null) {
+ attrs.put(KERNELMAJOREVERSON, hostInfo.getKernelMajVersion());
+ }
+ if (hostInfo.getKernelRelease() != null) {
+ attrs.put(KERNELRELEASE, hostInfo.getKernelRelease());
+ }
+ if (hostInfo.getKernelVersion() != null) {
+ attrs.put(KERNELVERSION, hostInfo.getKernelVersion());
+ }
+ if (hostInfo.getMacAddress() != null) {
+ attrs.put(MACADDRESS, hostInfo.getMacAddress());
+ }
+ if (hostInfo.getNetMask() != null) {
+ attrs.put(NETMASK, hostInfo.getNetMask());
+ }
+ if (hostInfo.getOSFamily() != null) {
+ attrs.put(OSFAMILY, hostInfo.getOSFamily());
}
+ if (hostInfo.getPhysicalProcessorCount() != 0) {
+ attrs.put(PHYSICALPROCESSORCOUNT, Long.toString(hostInfo.getPhysicalProcessorCount()));
+ }
+ if (hostInfo.getProcessorCount() != 0) {
+ attrs.put(PROCESSORCOUNT, Long.toString(hostInfo.getProcessorCount()));
+ }
+ if (Boolean.toString(hostInfo.getSeLinux()) != null) {
+ attrs.put(SELINUXENABLED, Boolean.toString(hostInfo.getSeLinux()));
+ }
+ if (hostInfo.getSwapSize() != null) {
+ attrs.put(SWAPSIZE, hostInfo.getSwapSize());
+ }
+ if (hostInfo.getSwapFree() != null) {
+ attrs.put(SWAPFREE, hostInfo.getSwapFree());
+ }
+ if (hostInfo.getTimeZone() != null) {
+ attrs.put(TIMEZONE, hostInfo.getTimeZone());
+ }
+ if (hostInfo.getOSRelease() != null) {
+ attrs.put(OS_RELEASE_VERSION, hostInfo.getOSRelease());
+ }
+
+ setHostAttributes(attrs);
}
@Override
public void setLastAgentEnv(AgentEnv env) {
- writeLock.lock();
- try {
- lastAgentEnv = env;
- } finally {
- writeLock.unlock();
- }
-
+ lastAgentEnv = env;
}
@Override
public AgentEnv getLastAgentEnv() {
- readLock.lock();
- try {
- return lastAgentEnv;
- } finally {
- readLock.unlock();
- }
-
+ return lastAgentEnv;
}
@Override
public HostState getState() {
- try {
- readLock.lock();
- return stateMachine.getCurrentState();
- }
- finally {
- readLock.unlock();
- }
+ return stateMachine.getCurrentState();
}
@Override
public void setState(HostState state) {
- try {
- writeLock.lock();
- stateMachine.setCurrentState(state);
-
- HostStateEntity hostStateEntity = getHostStateEntity();
+ stateMachine.setCurrentState(state);
+ HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- hostStateEntity.setCurrentState(state);
- hostStateEntity.setTimeInState(System.currentTimeMillis());
- saveIfPersisted();
- }
- }
- finally {
- writeLock.unlock();
+ if (hostStateEntity != null) {
+ hostStateEntity.setCurrentState(state);
+ hostStateEntity.setTimeInState(System.currentTimeMillis());
+ hostStateDAO.merge(hostStateEntity);
}
}
@@ -608,268 +599,146 @@ public class HostImpl implements Host {
@Override
public String getHostName() {
- // Not an updatable attribute - No locking necessary
- return hostEntity.getHostName();
+ return hostName;
}
@Override
public Long getHostId() {
- return hostEntity.getHostId();
- }
-
- @Override
- public void setHostName(String hostName) {
- try {
- writeLock.lock();
- if (!isPersisted()) {
- hostEntity.setHostName(hostName);
- } else {
- throw new UnsupportedOperationException("PK of persisted entity cannot be modified");
- }
- } finally {
- writeLock.unlock();
- }
+ return hostId;
}
@Override
public Integer getCurrentPingPort() {
- try {
- readLock.lock();
- return currentPingPort;
- }
- finally {
- readLock.unlock();
- }
+ return currentPingPort;
}
@Override
public void setCurrentPingPort(Integer currentPingPort) {
- try {
- writeLock.lock();
- this.currentPingPort = currentPingPort;
- }
- finally {
- writeLock.unlock();
- }
+ this.currentPingPort = currentPingPort;
}
@Override
public void setPublicHostName(String hostName) {
- try {
- writeLock.lock();
- getHostEntity().setPublicHostName(hostName);
- saveIfPersisted();
- }
- finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setPublicHostName(hostName);
+ hostDAO.merge(hostEntity);
}
@Override
public String getPublicHostName() {
- try {
- readLock.lock();
- return getHostEntity().getPublicHostName();
- }
- finally {
- readLock.unlock();
- }
+ return getHostEntity().getPublicHostName();
}
@Override
public String getIPv4() {
- try {
- readLock.lock();
- return getHostEntity().getIpv4();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getIpv4();
}
@Override
public void setIPv4(String ip) {
- try {
- writeLock.lock();
- getHostEntity().setIpv4(ip);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setIpv4(ip);
+ hostDAO.merge(hostEntity);
}
@Override
public String getIPv6() {
- try {
- readLock.lock();
- return getHostEntity().getIpv6();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getIpv6();
}
@Override
public void setIPv6(String ip) {
- try {
- writeLock.lock();
- getHostEntity().setIpv6(ip);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setIpv6(ip);
+ hostDAO.merge(hostEntity);
}
@Override
public int getCpuCount() {
- try {
- readLock.lock();
- return getHostEntity().getCpuCount();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getCpuCount();
}
@Override
public void setCpuCount(int cpuCount) {
- try {
- writeLock.lock();
- getHostEntity().setCpuCount(cpuCount);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setCpuCount(cpuCount);
+ hostDAO.merge(hostEntity);
}
@Override
public int getPhCpuCount() {
- try {
- readLock.lock();
- return getHostEntity().getPhCpuCount();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getPhCpuCount();
}
@Override
public void setPhCpuCount(int phCpuCount) {
- try {
- writeLock.lock();
- getHostEntity().setPhCpuCount(phCpuCount);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setPhCpuCount(phCpuCount);
+ hostDAO.merge(hostEntity);
}
@Override
public long getTotalMemBytes() {
- try {
- readLock.lock();
- return getHostEntity().getTotalMem();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getTotalMem();
}
@Override
public void setTotalMemBytes(long totalMemBytes) {
- try {
- writeLock.lock();
- getHostEntity().setTotalMem(totalMemBytes);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setTotalMem(totalMemBytes);
+ hostDAO.merge(hostEntity);
}
@Override
public long getAvailableMemBytes() {
- try {
- readLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- return hostStateEntity != null ? hostStateEntity.getAvailableMem() : null;
- }
- finally {
- readLock.unlock();
- }
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ return hostStateEntity != null ? hostStateEntity.getAvailableMem() : null;
}
@Override
public void setAvailableMemBytes(long availableMemBytes) {
- try {
- writeLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- getHostStateEntity().setAvailableMem(availableMemBytes);
- saveIfPersisted();
- }
- }
- finally {
- writeLock.unlock();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ hostStateEntity.setAvailableMem(availableMemBytes);
+ hostStateDAO.merge(hostStateEntity);
}
}
@Override
public String getOsArch() {
- try {
- readLock.lock();
- return getHostEntity().getOsArch();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getOsArch();
}
@Override
public void setOsArch(String osArch) {
- try {
- writeLock.lock();
- getHostEntity().setOsArch(osArch);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setOsArch(osArch);
+ hostDAO.merge(hostEntity);
}
@Override
public String getOsInfo() {
- try {
- readLock.lock();
- return getHostEntity().getOsInfo();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getOsInfo();
}
@Override
public void setOsInfo(String osInfo) {
- try {
- writeLock.lock();
- getHostEntity().setOsInfo(osInfo);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setOsInfo(osInfo);
+ hostDAO.merge(hostEntity);
}
@Override
public String getOsType() {
- try {
- readLock.lock();
- return getHostEntity().getOsType();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getOsType();
}
@Override
public void setOsType(String osType) {
- try {
- writeLock.lock();
- getHostEntity().setOsType(osType);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setOsType(osType);
+ hostDAO.merge(hostEntity);
}
@Override
@@ -881,212 +750,129 @@ public class HostImpl implements Host {
@Override
public List<DiskInfo> getDisksInfo() {
- try {
- readLock.lock();
- return disksInfo;
- } finally {
- readLock.unlock();
- }
+ return disksInfo;
}
@Override
public void setDisksInfo(List<DiskInfo> disksInfo) {
- try {
- writeLock.lock();
- this.disksInfo = disksInfo;
- } finally {
- writeLock.unlock();
- }
+ this.disksInfo = disksInfo;
}
@Override
public RecoveryReport getRecoveryReport() {
- try {
- readLock.lock();
- return recoveryReport;
- } finally {
- readLock.unlock();
- }
+ return recoveryReport;
}
@Override
public void setRecoveryReport(RecoveryReport recoveryReport) {
- try {
- writeLock.lock();
- this.recoveryReport = recoveryReport;
- } finally {
- writeLock.unlock();
- }
+ this.recoveryReport = recoveryReport;
}
@Override
public HostHealthStatus getHealthStatus() {
- try {
- readLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- return gson.fromJson(hostStateEntity.getHealthStatus(), HostHealthStatus.class);
- }
- return null;
- } finally {
- readLock.unlock();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ return gson.fromJson(hostStateEntity.getHealthStatus(), HostHealthStatus.class);
}
+
+ return null;
}
@Override
public void setHealthStatus(HostHealthStatus healthStatus) {
- try {
- writeLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- hostStateEntity.setHealthStatus(gson.toJson(healthStatus));
-
- if (healthStatus.getHealthStatus().equals(HealthStatus.UNKNOWN)) {
- setStatus(HealthStatus.UNKNOWN.name());
- }
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ hostStateEntity.setHealthStatus(gson.toJson(healthStatus));
- saveIfPersisted();
+ if (healthStatus.getHealthStatus().equals(HealthStatus.UNKNOWN)) {
+ setStatus(HealthStatus.UNKNOWN.name());
}
- } finally {
- writeLock.unlock();
+
+ hostStateDAO.merge(hostStateEntity);
}
}
@Override
- public String getPrefix() { return prefix; }
+ public String getPrefix() {
+ return prefix;
+ }
@Override
public void setPrefix(String prefix) {
- if (prefix != null && !prefix.equals(this.prefix)) {
- try {
- writeLock.lock();
- this.prefix = prefix;
- } finally {
- writeLock.unlock();
- }
+ if (StringUtils.isNotBlank(prefix) && !StringUtils.equals(this.prefix, prefix)) {
+ this.prefix = prefix;
}
}
@Override
public Map<String, String> getHostAttributes() {
- try {
- readLock.lock();
- return gson.fromJson(getHostEntity().getHostAttributes(),
- hostAttributesType);
- } finally {
- readLock.unlock();
- }
+ return gson.fromJson(getHostEntity().getHostAttributes(), hostAttributesType);
}
@Override
public void setHostAttributes(Map<String, String> hostAttributes) {
- try {
- writeLock.lock();
- HostEntity hostEntity = getHostEntity();
- Map<String, String> hostAttrs = gson.fromJson(hostEntity.getHostAttributes(), hostAttributesType);
- if (hostAttrs == null) {
- hostAttrs = new HashMap<String, String>();
- }
- hostAttrs.putAll(hostAttributes);
- hostEntity.setHostAttributes(gson.toJson(hostAttrs,hostAttributesType));
- saveIfPersisted();
- } finally {
- writeLock.unlock();
+ HostEntity hostEntity = getHostEntity();
+ Map<String, String> hostAttrs = gson.fromJson(hostEntity.getHostAttributes(), hostAttributesType);
+
+ if (hostAttrs == null) {
+ hostAttrs = new ConcurrentHashMap<String, String>();
}
+
+ hostAttrs.putAll(hostAttributes);
+ hostEntity.setHostAttributes(gson.toJson(hostAttrs,hostAttributesType));
+ hostDAO.merge(hostEntity);
}
@Override
public String getRackInfo() {
- try {
- readLock.lock();
- return getHostEntity().getRackInfo();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getRackInfo();
}
@Override
public void setRackInfo(String rackInfo) {
- try {
- writeLock.lock();
- getHostEntity().setRackInfo(rackInfo);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setRackInfo(rackInfo);
+ hostDAO.merge(hostEntity);
}
@Override
public long getLastRegistrationTime() {
- try {
- readLock.lock();
- return getHostEntity().getLastRegistrationTime();
- } finally {
- readLock.unlock();
- }
+ return getHostEntity().getLastRegistrationTime();
}
@Override
public void setLastRegistrationTime(long lastRegistrationTime) {
- try {
- writeLock.lock();
- getHostEntity().setLastRegistrationTime(lastRegistrationTime);
- saveIfPersisted();
- } finally {
- writeLock.unlock();
- }
+ HostEntity hostEntity = getHostEntity();
+ hostEntity.setLastRegistrationTime(lastRegistrationTime);
+ hostDAO.merge(hostEntity);
}
@Override
public long getLastHeartbeatTime() {
- try {
- readLock.lock();
- return lastHeartbeatTime;
- }
- finally {
- readLock.unlock();
- }
+ return lastHeartbeatTime;
}
@Override
public void setLastHeartbeatTime(long lastHeartbeatTime) {
- try {
- writeLock.lock();
- this.lastHeartbeatTime = lastHeartbeatTime;
- }
- finally {
- writeLock.unlock();
- }
+ this.lastHeartbeatTime = lastHeartbeatTime;
}
@Override
public AgentVersion getAgentVersion() {
- try {
- readLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- return gson.fromJson(getHostStateEntity().getAgentVersion(),
- AgentVersion.class);
- }
- return null;
- }
- finally {
- readLock.unlock();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ return gson.fromJson(hostStateEntity.getAgentVersion(), AgentVersion.class);
}
+
+ return null;
}
@Override
public void setAgentVersion(AgentVersion agentVersion) {
- try {
- writeLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- getHostStateEntity().setAgentVersion(gson.toJson(agentVersion));
- saveIfPersisted();
- }
- }
- finally {
- writeLock.unlock();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ hostStateEntity.setAgentVersion(gson.toJson(agentVersion));
+ hostStateDAO.merge(hostStateEntity);
}
}
@@ -1098,16 +884,10 @@ public class HostImpl implements Host {
@Override
public void setTimeInState(long timeInState) {
- try {
- writeLock.lock();
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- getHostStateEntity().setTimeInState(timeInState);
- saveIfPersisted();
- }
- }
- finally {
- writeLock.unlock();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ hostStateEntity.setTimeInState(timeInState);
+ hostStateDAO.merge(hostStateEntity);
}
}
@@ -1119,14 +899,7 @@ public class HostImpl implements Host {
@Override
public void setStatus(String status) {
- if (status != null && !status.equals(this.status)) {
- try {
- writeLock.lock();
- this.status = status;
- } finally {
- writeLock.unlock();
- }
- }
+ this.status = status;
}
@Override
@@ -1148,95 +921,38 @@ public class HostImpl implements Host {
return (null == getHostName() ? 0 : getHostName().hashCode());
}
- public int compareTo(HostEntity other) {
- return getHostName().compareTo(other.getHostName());
- }
-
@Override
public HostResponse convertToResponse() {
- try {
- readLock.lock();
- HostResponse r = new HostResponse(getHostName());
-
- r.setAgentVersion(getAgentVersion());
- r.setAvailableMemBytes(getAvailableMemBytes());
- r.setPhCpuCount(getPhCpuCount());
- r.setCpuCount(getCpuCount());
- r.setDisksInfo(getDisksInfo());
- r.setHealthStatus(getHealthStatus());
- r.setHostAttributes(getHostAttributes());
- r.setIpv4(getIPv4());
- r.setIpv6(getIPv6());
- r.setLastHeartbeatTime(getLastHeartbeatTime());
- r.setLastAgentEnv(lastAgentEnv);
- r.setLastRegistrationTime(getLastRegistrationTime());
- r.setOsArch(getOsArch());
- r.setOsInfo(getOsInfo());
- r.setOsType(getOsType());
- r.setRackInfo(getRackInfo());
- r.setTotalMemBytes(getTotalMemBytes());
- r.setPublicHostName(getPublicHostName());
- r.setHostState(getState().toString());
- r.setStatus(getStatus());
- r.setRecoveryReport(getRecoveryReport());
- r.setRecoverySummary(getRecoveryReport().getSummary());
-
- return r;
- }
- finally {
- readLock.unlock();
- }
- }
-
- /**
- * Shows if Host is persisted to database
- *
- * @return true if persisted
- */
- @Override
- public boolean isPersisted() {
- readLock.lock();
- try {
- return persisted;
- } finally {
- readLock.unlock();
- }
- }
-
- /**
- * Save host to database and make all changes to be saved afterwards
- */
- @Override
- public void persist() {
- writeLock.lock();
- try {
- if (!persisted) {
- persistEntities();
- refresh();
- for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
- try {
- clusters.getClusterById(clusterEntity.getClusterId()).refresh();
- } catch (AmbariException e) {
- LOG.error("Error while looking up the cluster", e);
- throw new RuntimeException("Cluster '" + clusterEntity.getClusterId() + "' was removed", e);
- }
- }
- persisted = true;
- } else {
- //refresh entities from active session
- getHostEntity();
- getHostStateEntity();
- saveIfPersisted();
- }
- } finally {
- writeLock.unlock();
- }
+ HostResponse r = new HostResponse(getHostName());
+
+ r.setAgentVersion(getAgentVersion());
+ r.setAvailableMemBytes(getAvailableMemBytes());
+ r.setPhCpuCount(getPhCpuCount());
+ r.setCpuCount(getCpuCount());
+ r.setDisksInfo(getDisksInfo());
+ r.setHealthStatus(getHealthStatus());
+ r.setHostAttributes(getHostAttributes());
+ r.setIpv4(getIPv4());
+ r.setIpv6(getIPv6());
+ r.setLastHeartbeatTime(getLastHeartbeatTime());
+ r.setLastAgentEnv(lastAgentEnv);
+ r.setLastRegistrationTime(getLastRegistrationTime());
+ r.setOsArch(getOsArch());
+ r.setOsInfo(getOsInfo());
+ r.setOsType(getOsType());
+ r.setRackInfo(getRackInfo());
+ r.setTotalMemBytes(getTotalMemBytes());
+ r.setPublicHostName(getPublicHostName());
+ r.setHostState(getState().toString());
+ r.setStatus(getStatus());
+ r.setRecoveryReport(getRecoveryReport());
+ r.setRecoverySummary(getRecoveryReport().getSummary());
+ return r;
}
@Transactional
- void persistEntities() {
+ private void persistEntities(HostEntity hostEntity) {
hostDAO.create(hostEntity);
- hostStateDAO.create(hostStateEntity);
if (!hostEntity.getClusterEntities().isEmpty()) {
for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
clusterEntity.getHostEntities().add(hostEntity);
@@ -1256,14 +972,6 @@ public class HostImpl implements Host {
}
}
- @Transactional
- void saveIfPersisted() {
- if (isPersisted()) {
- hostDAO.merge(hostEntity);
- hostStateDAO.merge(hostStateEntity);
- }
- }
-
@Override
@Transactional
public boolean addDesiredConfig(long clusterId, boolean selected, String user, Config config) {
@@ -1317,7 +1025,7 @@ public class HostImpl implements Host {
Map<String, DesiredConfig> map = new HashMap<String, DesiredConfig>();
for (HostConfigMapping e : hostConfigMappingDAO.findSelected(
- clusterId, hostEntity.getHostId())) {
+ clusterId, getHostId())) {
DesiredConfig dc = new DesiredConfig();
dc.setTag(e.getVersion());
@@ -1388,66 +1096,50 @@ public class HostImpl implements Host {
}
private HostConfigMapping getDesiredConfigEntity(long clusterId, String type) {
- return hostConfigMappingDAO.findSelectedByType(clusterId, hostEntity.getHostId(), type);
- }
-
- private void ensureMaintMap() {
- if (null == maintMap) {
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- String entity = hostStateEntity.getMaintenanceState();
- if (null == entity) {
- maintMap = new HashMap<Long, MaintenanceState>();
- } else {
- try {
- maintMap = gson.fromJson(entity, maintMapType);
- } catch (Exception e) {
- maintMap = new HashMap<Long, MaintenanceState>();
- }
- }
- }
- }
+ return hostConfigMappingDAO.findSelectedByType(clusterId, getHostId(), type);
}
- @Override
- public void setMaintenanceState(long clusterId, MaintenanceState state) {
+ private ConcurrentMap<Long, MaintenanceState> ensureMaintMap(HostStateEntity hostStateEntity) {
+ if (null == hostStateEntity || null == hostStateEntity.getMaintenanceState()) {
+ return new ConcurrentHashMap<>();
+ }
+
+ String entity = hostStateEntity.getMaintenanceState();
+ final ConcurrentMap<Long, MaintenanceState> map;
+
try {
- writeLock.lock();
+ Map<Long, MaintenanceState> gsonMap = gson.fromJson(entity, maintMapType);
+ map = new ConcurrentHashMap<>(gsonMap);
+ } catch (Exception e) {
+ return new ConcurrentHashMap<>();
+ }
- ensureMaintMap();
+ return map;
+ }
- maintMap.put(clusterId, state);
- String json = gson.toJson(maintMap, maintMapType);
+ @Override
+ public void setMaintenanceState(long clusterId, MaintenanceState state) {
+ maintMap.put(clusterId, state);
+ String json = gson.toJson(maintMap, maintMapType);
- HostStateEntity hostStateEntity = getHostStateEntity();
- if (hostStateEntity != null) {
- getHostStateEntity().setMaintenanceState(json);
- saveIfPersisted();
+ HostStateEntity hostStateEntity = getHostStateEntity();
+ if (hostStateEntity != null) {
+ hostStateEntity.setMaintenanceState(json);
+ hostStateDAO.merge(hostStateEntity);
- // broadcast the maintenance mode change
- MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
- eventPublisher.publish(event);
- }
- } finally {
- writeLock.unlock();
+ // broadcast the maintenance mode change
+ MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
+ eventPublisher.publish(event);
}
}
@Override
public MaintenanceState getMaintenanceState(long clusterId) {
- try {
- readLock.lock();
-
- ensureMaintMap();
-
- if (!maintMap.containsKey(clusterId)) {
- maintMap.put(clusterId, MaintenanceState.OFF);
- }
-
- return maintMap.get(clusterId);
- } finally {
- readLock.unlock();
+ if (!maintMap.containsKey(clusterId)) {
+ maintMap.put(clusterId, MaintenanceState.OFF);
}
+
+ return maintMap.get(clusterId);
}
/**
@@ -1462,20 +1154,13 @@ public class HostImpl implements Host {
// Get the cached host entity or load it fresh through the DAO.
public HostEntity getHostEntity() {
- if (isPersisted()) {
- hostEntity = hostDAO.findById(hostEntity.getHostId());
- }
- return hostEntity;
+ return hostDAO.findById(hostId);
}
// Get the cached host state entity or load it fresh through the DAO.
public HostStateEntity getHostStateEntity() {
- if (isPersisted()) {
- hostStateEntity = hostStateDAO.findByHostId(hostEntity.getHostId()) ;
- }
- return hostStateEntity;
+ return hostStateDAO.findByHostId(hostId);
}
-
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index b3e3941..bba0325 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -742,7 +742,6 @@ public class TopologyManager {
if (null != rackInfoFromTemplate) {
host.setRackInfo(rackInfoFromTemplate);
- host.persist(); //todo this is required only if host is not persisted to database yet, is it really so?
try {
// todo: do we need this in case of blueprints?
ambariContext.getController().registerRackChange(ambariContext.getClusterName(topology.getClusterId()));
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index b212e93..ffca51d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -103,7 +103,6 @@ public class ExecutionCommandWrapperTest {
clusters = injector.getInstance(Clusters.class);
clusters.addHost(HOST1);
- clusters.getHost(HOST1).persist();
clusters.addCluster(CLUSTER1, new StackId("HDP-0.1"));
Cluster cluster1 = clusters.getCluster(CLUSTER1);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index bf9d0db..1ca777d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@ -112,10 +112,7 @@ public class TestActionDBAccessorImpl {
// Add this host's name since it is needed for server-side actions.
clusters.addHost(serverHostName);
- clusters.getHost(serverHostName).persist();
-
clusters.addHost(hostName);
- clusters.getHost(hostName).persist();
StackId stackId = new StackId("HDP-0.1");
clusters.addCluster(clusterName, stackId);
@@ -273,7 +270,6 @@ public class TestActionDBAccessorImpl {
for (int i = 0; i < 1000; i++) {
String hostName = "c64-" + i;
clusters.addHost(hostName);
- clusters.getHost(hostName).persist();
}
// create 1 request, 3 stages per host, each with 2 commands
@@ -460,7 +456,6 @@ public class TestActionDBAccessorImpl {
requestIds.add(requestId);
populateActionDB(db, hostName, requestId, stageId);
clusters.addHost("host2");
- clusters.getHost("host2").persist();
populateActionDB(db, hostName, requestId + 1, stageId);
List<Long> requestIdsResult =
db.getRequestsByStatus(null, BaseRequest.DEFAULT_PAGE_SIZE, false);
@@ -546,11 +541,8 @@ public class TestActionDBAccessorImpl {
s.setStageId(stageId);
clusters.addHost("host2");
- clusters.getHost("host2").persist();
clusters.addHost("host3");
- clusters.getHost("host3").persist();
clusters.addHost("host4");
- clusters.getHost("host4").persist();
s.addHostRoleExecutionCommand("host1", Role.HBASE_MASTER,
RoleCommand.START,
@@ -680,7 +672,6 @@ public class TestActionDBAccessorImpl {
String host = "host" + i;
clusters.addHost(host);
- clusters.getHost(host).persist();
s.addHostRoleExecutionCommand("host" + i, Role.HBASE_MASTER,
RoleCommand.START, null, "cluster1", "HBASE", false, false);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
index fa51f91..da54789 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
@@ -24,21 +24,19 @@ import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.agent.ActionQueue;
import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.audit.AuditLogger;
-import org.apache.ambari.server.controller.HostsMap;
import org.apache.ambari.server.events.publishers.JPAEventPublisher;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
@@ -57,7 +55,8 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.UnitOfWork;
-import static org.junit.Assert.assertNotNull;
+
+import junit.framework.Assert;
public class TestActionManager {
@@ -79,7 +78,6 @@ public class TestActionManager {
stageFactory = injector.getInstance(StageFactory.class);
clusters.addHost(hostname);
- clusters.getHost(hostname).persist();
StackId stackId = new StackId("HDP-0.1");
clusters.addCluster(clusterName, stackId);
unitOfWork = injector.getInstance(UnitOfWork.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index 37a6ae0..674025c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -68,6 +68,7 @@ import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionImpl;
import org.apache.ambari.server.state.stack.OsFamily;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
+import org.apache.ambari.server.topology.PersistedState;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.codehaus.jettison.json.JSONException;
@@ -314,6 +315,7 @@ public class AgentResourceTest extends RandomPortJerseyTest {
bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
bind(HostDAO.class).toInstance(createNiceMock(HostDAO.class));
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
+ bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class));
}
private void installDependencies() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 6e3f2e0..dd93374 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -543,7 +543,6 @@ public class HeartbeatProcessorTest {
public void testCommandReport() throws AmbariException {
injector.injectMembers(this);
clusters.addHost(DummyHostname1);
- clusters.getHost(DummyHostname1).persist();
StackId dummyStackId = new StackId(DummyStackId);
clusters.addCluster(DummyCluster, dummyStackId);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index e813e66..43503fa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -213,7 +213,6 @@ public class HeartbeatTestHelper {
clusters.addHost(hostName);
Host host = clusters.getHost(hostName);
host.setHostAttributes(hostAttributes);
- host.persist();
HostEntity hostEntity = hostDAO.findByName(hostName);
Assert.assertNotNull(hostEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index d121fcb..503c8e5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -146,10 +146,8 @@ public class TestHeartbeatMonitor {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addHost(hostname1);
setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
- clusters.getHost(hostname1).persist();
clusters.addHost(hostname2);
setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
- clusters.getHost(hostname2).persist();
clusters.addCluster(clusterName, stackId);
Cluster cluster = clusters.getCluster(clusterName);
helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
@@ -232,10 +230,8 @@ public class TestHeartbeatMonitor {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addHost(hostname1);
setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
- clusters.getHost(hostname1).persist();
clusters.addHost(hostname2);
setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
- clusters.getHost(hostname2).persist();
clusters.addCluster(clusterName, stackId);
Cluster cluster = clusters.getCluster(clusterName);
@@ -360,7 +356,6 @@ public class TestHeartbeatMonitor {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addHost(hostname1);
setOsFamily(clusters.getHost(hostname1), "redhat", "5.9");
- clusters.getHost(hostname1).persist();
clusters.addCluster(clusterName, stackId);
Cluster cluster = clusters.getCluster(clusterName);
@@ -443,7 +438,6 @@ public class TestHeartbeatMonitor {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addHost(hostname1);
setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
- clusters.getHost(hostname1).persist();
clusters.addCluster(clusterName, stackId);
Cluster cluster = clusters.getCluster(clusterName);
@@ -559,11 +553,9 @@ public class TestHeartbeatMonitor {
clusters.addHost(hostname1);
setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
- clusters.getHost(hostname1).persist();
clusters.addHost(hostname2);
setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
- clusters.getHost(hostname2).persist();
clusters.addCluster(clusterName, stackId);
Cluster cluster = clusters.getCluster(clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
index 080ca3a..c45c59d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
@@ -18,8 +18,8 @@
package org.apache.ambari.server.checks;
import java.util.ArrayList;
-import java.util.List;
import java.util.Collections;
+import java.util.List;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
@@ -44,8 +44,6 @@ import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@@ -147,7 +145,6 @@ public class InstallPackagesCheckTest {
final List<HostVersionEntity> hostVersionEntities = new ArrayList<HostVersionEntity>();
for(String hostName : hostNames) {
Host host = Mockito.mock(Host.class);
- host.setHostName(hostName);
Mockito.when(host.getHostName()).thenReturn(hostName);
Mockito.when(host.getMaintenanceState(1L)).thenReturn(MaintenanceState.OFF);
hosts.add(host);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
index e82900d..5556071 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
@@ -470,7 +470,6 @@ public class AmbariCustomCommandExecutionHelperTest {
clusters.addHost(hostname);
setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
clusters.getHost(hostname).setState(HostState.HEALTHY);
- clusters.getHost(hostname).persist();
if (null != clusterName) {
clusters.mapHostToCluster(hostname, clusterName);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 197b925..a3521fd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -18,7 +18,6 @@
package org.apache.ambari.server.controller;
-import org.apache.ambari.server.controller.internal.DeleteStatusMetaData;
import static org.easymock.EasyMock.capture;
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.createStrictMock;
@@ -79,10 +78,10 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.audit.AuditLogger;
-import org.apache.ambari.server.audit.AuditLoggerModule;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.internal.ClusterStackVersionResourceProviderTest;
import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
+import org.apache.ambari.server.controller.internal.DeleteStatusMetaData;
import org.apache.ambari.server.controller.internal.HostComponentResourceProviderTest;
import org.apache.ambari.server.controller.internal.HostResourceProviderTest;
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
@@ -171,7 +170,6 @@ import org.springframework.security.core.context.SecurityContextHolder;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
-import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
@@ -313,7 +311,6 @@ public class AmbariManagementControllerTest {
clusters.addHost(hostname);
setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
clusters.getHost(hostname).setState(HostState.HEALTHY);
- clusters.getHost(hostname).persist();
}
if (null != clusterName) {
@@ -628,8 +625,6 @@ public class AmbariManagementControllerTest {
clusters.addHost(host2);
setOsFamily(clusters.getHost(host1), "redhat", "6.3");
setOsFamily(clusters.getHost(host2), "redhat", "6.3");
- clusters.getHost(host1).persist();
- clusters.getHost(host2).persist();
controller.createCluster(r);
Assert.assertNotNull(clusters.getCluster(cluster1));
@@ -1674,19 +1669,16 @@ public class AmbariManagementControllerTest {
h1.setIPv4("ipv41");
h1.setIPv6("ipv61");
setOsFamily(h1, "redhat", "6.3");
- h1.persist();
clusters.addHost(host2);
Host h2 = clusters.getHost(host2);
h2.setIPv4("ipv42");
h2.setIPv6("ipv62");
setOsFamily(h2, "redhat", "6.3");
- h2.persist();
clusters.addHost(host3);
Host h3 = clusters.getHost(host3);
h3.setIPv4("ipv43");
h3.setIPv6("ipv63");
setOsFamily(h3, "redhat", "6.3");
- h3.persist();
try {
set1.clear();
@@ -1808,8 +1800,6 @@ public class AmbariManagementControllerTest {
clusters.addHost(host2);
setOsFamily(clusters.getHost(host1), "redhat", "5.9");
setOsFamily(clusters.getHost(host2), "redhat", "5.9");
- clusters.getHost(host1).persist();
- clusters.getHost(host2).persist();
HostRequest request = new HostRequest(host2, "foo", new HashMap<String, String>());
requests.add(request);
@@ -1864,9 +1854,6 @@ public class AmbariManagementControllerTest {
setOsFamily(clusters.getHost(host1), "redhat", "5.9");
setOsFamily(clusters.getHost(host2), "redhat", "5.9");
setOsFamily(clusters.getHost(host3), "redhat", "5.9");
- clusters.getHost(host1).persist();
- clusters.getHost(host2).persist();
- clusters.getHost(host3).persist();
HostRequest r1 = new HostRequest(host1, cluster1, null);
HostRequest r2 = new HostRequest(host2, cluster1, null);
@@ -3136,7 +3123,6 @@ public class AmbariManagementControllerTest {
"centos5");
clusters.addHost(host4);
setOsFamily(clusters.getHost(host4), "redhat", "5.9");
- clusters.getHost(host4).persist();
Map<String, String> attrs = new HashMap<String, String>();
attrs.put("a1", "b1");
@@ -8377,7 +8363,6 @@ public class AmbariManagementControllerTest {
RepositoryVersionState.INSTALLING);
clusters.addHost(hostName1);
setOsFamily(clusters.getHost(hostName1), "redhat", "5.9");
- clusters.getHost(hostName1).persist();
clusters.mapHostsToCluster(new HashSet<String>(){
{add(hostName1);}}, cluster1);
@@ -9353,13 +9338,10 @@ public class AmbariManagementControllerTest {
clusters.addHost("host3");
Host host = clusters.getHost("host1");
setOsFamily(host, "redhat", "6.3");
- host.persist();
host = clusters.getHost("host2");
setOsFamily(host, "redhat", "6.3");
- host.persist();
host = clusters.getHost("host3");
setOsFamily(host, "redhat", "6.3");
- host.persist();
ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, "HDP-1.2.0", null);
amc.createCluster(clusterRequest);
@@ -9402,12 +9384,10 @@ public class AmbariManagementControllerTest {
clusters.addHost(HOST1);
Host host = clusters.getHost(HOST1);
setOsFamily(host, "redhat", "6.3");
- host.persist();
clusters.addHost(HOST2);
host = clusters.getHost(HOST2);
setOsFamily(host, "redhat", "6.3");
- host.persist();
AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
@@ -9497,13 +9477,10 @@ public class AmbariManagementControllerTest {
clusters.addHost(host3);
Host host = clusters.getHost("host1");
setOsFamily(host, "redhat", "5.9");
- host.persist();
host = clusters.getHost("host2");
setOsFamily(host, "redhat", "5.9");
- host.persist();
host = clusters.getHost("host3");
setOsFamily(host, "redhat", "5.9");
- host.persist();
ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, "HDP-1.2.0", null);
amc.createCluster(clusterRequest);
@@ -9802,7 +9779,6 @@ public class AmbariManagementControllerTest {
clusters.addHost(HOST1);
Host host = clusters.getHost(HOST1);
setOsFamily(host, "redhat", "5.9");
- host.persist();
ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
amc.createCluster(clusterRequest);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
index 73b5b75..58f597f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
@@ -178,7 +178,6 @@ public class BackgroundCustomCommandExecutionTest {
clusters.addHost(hostname);
setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
clusters.getHost(hostname).setState(HostState.HEALTHY);
- clusters.getHost(hostname).persist();
if (null != clusterName) {
clusters.mapHostToCluster(hostname, clusterName);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
index 2512bb3..a54fb98 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
@@ -24,8 +24,6 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
@@ -46,11 +44,13 @@ import org.apache.ambari.server.state.State;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.springframework.security.core.context.SecurityContextHolder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import org.springframework.security.core.context.SecurityContextHolder;
+
+import junit.framework.Assert;
@SuppressWarnings("serial")
public class RefreshYarnCapacitySchedulerReleaseConfigTest {
@@ -177,7 +177,6 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
clusters.addHost(hostname);
setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
clusters.getHost(hostname).setState(HostState.HEALTHY);
- clusters.getHost(hostname).persist();
if (null != clusterName) {
clusters.mapHostToCluster(hostname, clusterName);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
index 9f610f9..c5eb76e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
@@ -170,14 +170,12 @@ public class JMXHostProviderTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
clusters.getHost("h1").setHostAttributes(hostAttributes);
- clusters.getHost("h1").persist();
String host2 = "h2";
clusters.addHost(host2);
hostAttributes = new HashMap<String, String>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.3");
clusters.getHost("h2").setHostAttributes(hostAttributes);
- clusters.getHost("h2").persist();
clusters.mapHostToCluster(host1, clusterName);
clusters.mapHostToCluster(host2, clusterName);
@@ -262,14 +260,12 @@ public class JMXHostProviderTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
clusters.getHost("h1").setHostAttributes(hostAttributes);
- clusters.getHost("h1").persist();
String host2 = "h2";
clusters.addHost(host2);
hostAttributes = new HashMap<String, String>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.3");
clusters.getHost("h2").setHostAttributes(hostAttributes);
- clusters.getHost("h2").persist();
clusters.mapHostToCluster(host1, clusterName);
clusters.mapHostToCluster(host2, clusterName);
@@ -365,14 +361,12 @@ public class JMXHostProviderTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "5.9");
clusters.getHost("h1").setHostAttributes(hostAttributes);
- clusters.getHost("h1").persist();
String host2 = "h2";
clusters.addHost(host2);
hostAttributes = new HashMap<String, String>();
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.3");
clusters.getHost("h2").setHostAttributes(hostAttributes);
- clusters.getHost("h2").persist();
clusters.mapHostToCluster(host1, clusterName);
clusters.mapHostToCluster(host2, clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
index 2721731..958957b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
@@ -146,7 +146,6 @@ public class StackDefinedPropertyProviderTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster("h1", "c2");
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index 139702c..a0940ba 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -174,7 +174,6 @@ public class UpgradeResourceProviderHDP22Test {
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster("h1", "c1");
http://git-wip-us.apache.org/repos/asf/ambari/blob/38700445/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 63892cf..c2c8ced 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -232,7 +232,6 @@ public class UpgradeResourceProviderTest {
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
host.setState(HostState.HEALTHY);
- host.persist();
clusters.mapHostToCluster("h1", "c1");
@@ -590,7 +589,6 @@ public class UpgradeResourceProviderTest {
hostAttributes.put("os_family", "redhat");
hostAttributes.put("os_release_version", "6.3");
host.setHostAttributes(hostAttributes);
- host.persist();
clusters.mapHostToCluster("h2", "c1");
Cluster cluster = clusters.getCluster("c1");
[14/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/197a37f0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/197a37f0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/197a37f0
Branch: refs/heads/trunk
Commit: 197a37f034b2bc10f483d558cde7ab0f874c8d4c
Parents: 78a875c f6124a0
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Oct 7 14:21:34 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 7 14:21:34 2016 -0400
----------------------------------------------------------------------
ambari-agent/conf/unix/ambari-agent | 17 ++
.../TestDataStructureUtils.py | 47 ++++
.../libraries/functions/data_structure_utils.py | 57 ++++
.../server/state/DependencyConditionInfo.java | 102 +++++++
.../ambari/server/state/DependencyInfo.java | 36 ++-
.../server/topology/BlueprintValidatorImpl.java | 13 +
.../common-services/HDFS/2.1.0.2.0/metainfo.xml | 44 +++
.../4.0.0.2.0/package/scripts/oozie_service.py | 5 +
.../stacks/HDP/2.0.6/services/stack_advisor.py | 269 ++++++++++++-------
.../stacks/HDP/2.3/services/stack_advisor.py | 13 +-
.../topology/BlueprintValidatorImplTest.java | 75 +++++-
.../stacks/2.0.6/OOZIE/test_oozie_server.py | 8 +
.../stacks/2.0.6/common/test_stack_advisor.py | 215 +++++++++++++++
.../MICROSOFT_R/8.0.0/kerberos.json | 15 --
.../MICROSOFT_R/8.0.0/metainfo.xml | 53 ----
.../package/files/microsoft_r_serviceCheck.r | 28 --
.../8.0.0/package/scripts/microsoft_r.py | 47 ----
.../MICROSOFT_R/8.0.0/package/scripts/params.py | 31 ---
.../8.0.0/package/scripts/params_linux.py | 79 ------
.../8.0.0/package/scripts/params_windows.py | 26 --
.../8.0.0/package/scripts/service_check.py | 89 ------
.../MICROSOFT_R/8.0.0/role_command_order.json | 6 -
.../MICROSOFT_R/8.0.0/service_advisor.py | 73 -----
.../MICROSOFT_R/8.0.5/kerberos.json | 15 ++
.../MICROSOFT_R/8.0.5/metainfo.xml | 53 ++++
.../package/files/microsoft_r_serviceCheck.r | 28 ++
.../8.0.5/package/scripts/microsoft_r.py | 47 ++++
.../MICROSOFT_R/8.0.5/package/scripts/params.py | 31 +++
.../8.0.5/package/scripts/params_linux.py | 79 ++++++
.../8.0.5/package/scripts/params_windows.py | 26 ++
.../8.0.5/package/scripts/service_check.py | 89 ++++++
.../MICROSOFT_R/8.0.5/role_command_order.json | 6 +
.../MICROSOFT_R/8.0.5/service_advisor.py | 73 +++++
.../MICROSOFT_R/8.0.0/metainfo.xml | 28 --
.../MICROSOFT_R/8.0.0/repos/repoinfo.xml | 33 ---
.../MICROSOFT_R/8.0.5/metainfo.xml | 28 ++
.../MICROSOFT_R/8.0.5/repos/repoinfo.xml | 33 +++
.../src/main/resources/mpack.json | 2 +-
.../ui/hueambarimigration-view/package.json | 2 +-
.../src/main/resources/ui/package.json | 2 +-
40 files changed, 1304 insertions(+), 619 deletions(-)
----------------------------------------------------------------------
[06/32] ambari git commit: AMBARI-18495 - Remove Unnecessary Locks
Inside Of Cluster Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-18495 - Remove Unnecessary Locks Inside Of Cluster Business Object Implementations (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d53c9e2e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d53c9e2e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d53c9e2e
Branch: refs/heads/trunk
Commit: d53c9e2ee99c046ae5b37b531419549397f4aea1
Parents: 817aed4
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Sep 29 15:20:53 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 3 13:03:59 2016 -0400
----------------------------------------------------------------------
ambari-project/pom.xml | 2 +-
.../ambari/server/controller/AmbariServer.java | 11 +-
.../internal/UpgradeResourceProvider.java | 2 +-
.../org/apache/ambari/server/state/Cluster.java | 5 -
.../apache/ambari/server/state/Clusters.java | 9 +
.../server/state/cluster/ClusterImpl.java | 1348 +++++++-----------
.../server/state/cluster/ClustersImpl.java | 629 ++++----
.../apache/ambari/server/utils/RetryHelper.java | 18 +-
.../ambari/server/agent/AgentResourceTest.java | 10 +-
.../server/agent/HeartbeatProcessorTest.java | 11 +-
.../server/agent/HeartbeatTestHelper.java | 15 +-
.../server/agent/TestHeartbeatHandler.java | 30 +-
.../server/api/services/ClusterServiceTest.java | 43 +-
.../configuration/RecoveryConfigHelperTest.java | 87 +-
.../server/controller/KerberosHelperTest.java | 78 +-
.../internal/ClusterResourceProviderTest.java | 7 +-
.../apache/ambari/server/orm/OrmTestHelper.java | 21 +-
.../server/orm/dao/ClusterVersionDAOTest.java | 12 +-
.../server/orm/dao/ConfigGroupDAOTest.java | 33 +-
.../orm/dao/RepositoryVersionDAOTest.java | 6 +-
.../ambari/server/orm/dao/SettingDAOTest.java | 19 +-
.../ambari/server/orm/dao/WidgetDAOTest.java | 8 +-
.../server/orm/dao/WidgetLayoutDAOTest.java | 13 +-
.../ambari/server/state/ConfigGroupTest.java | 8 +-
.../ambari/server/state/ConfigHelperTest.java | 9 +-
.../server/state/RequestExecutionTest.java | 9 +-
.../state/cluster/ClusterDeadlockTest.java | 35 +-
.../cluster/ClusterEffectiveVersionTest.java | 19 +-
.../server/state/cluster/ClusterTest.java | 34 +-
.../ambari/server/utils/StageUtilsTest.java | 3 +-
30 files changed, 1058 insertions(+), 1476 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-project/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index 4f045fe..3ef3086 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -114,7 +114,7 @@
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derby</artifactId>
- <version>10.9.1.0</version>
+ <version>10.12.1.1</version>
</dependency>
<dependency>
<groupId>org.springframework.security</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 5e498f0..a3c61df 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -25,17 +25,16 @@ import java.net.Authenticator;
import java.net.BindException;
import java.net.PasswordAuthentication;
import java.net.URL;
-import java.util.ArrayList;
import java.util.EnumSet;
import java.util.Enumeration;
-import java.util.List;
import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.logging.LogManager;
import javax.crypto.BadPaddingException;
import javax.servlet.DispatcherType;
-import com.google.common.base.Joiner;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.StateRecoveryManager;
import org.apache.ambari.server.StaticallyInject;
@@ -149,6 +148,7 @@ import org.springframework.web.context.request.RequestContextListener;
import org.springframework.web.context.support.GenericWebApplicationContext;
import org.springframework.web.filter.DelegatingFilterProxy;
+import com.google.common.base.Joiner;
import com.google.common.util.concurrent.ServiceManager;
import com.google.gson.Gson;
import com.google.inject.Guice;
@@ -160,9 +160,6 @@ import com.google.inject.name.Named;
import com.google.inject.persist.Transactional;
import com.sun.jersey.spi.container.servlet.ServletContainer;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
@Singleton
public class AmbariServer {
@@ -880,7 +877,7 @@ public class AmbariServer {
BaseService.init(injector.getInstance(RequestAuditLogger.class));
- RetryHelper.init(configs.getOperationsRetryAttempts());
+ RetryHelper.init(injector.getInstance(Clusters.class), configs.getOperationsRetryAttempts());
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index d37e32b..b3d23d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -360,7 +360,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
// rollback
// As we operate inside with cluster data, any cache which belongs to
// cluster need to be flushed
- cluster.invalidateData();
+ clusters.get().invalidate(cluster);
throw e;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index d141df8..b1958ef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -663,11 +663,6 @@ public interface Cluster {
void removeConfigurations(StackId stackId);
/**
- * Clear cluster caches and re-read data from database
- */
- void invalidateData();
-
- /**
* Returns whether this cluster was provisioned by a Blueprint or not.
* @return true if the cluster was deployed with a Blueprint otherwise false.
*/
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
index 2d859b3..e2164c0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
@@ -278,4 +278,13 @@ public interface Clusters {
*/
int getClusterSize(String clusterName);
+ /**
+ * Invalidates the specified cluster by retrieving it from the database and
+ * refreshing all of the internal stateful collections.
+ *
+ * @param cluster
+ * the cluster to invalidate and refresh (not {@code null}).
+ */
+ void invalidate(Cluster cluster);
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index a6f0a3b..848036d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -30,8 +30,9 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
-import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
@@ -173,34 +174,32 @@ public class ClusterImpl implements Cluster {
private StackId desiredStackVersion;
- private volatile boolean desiredStackVersionSet = true;
-
- private volatile Map<String, Service> services = null;
+ private final ConcurrentSkipListMap<String, Service> services = new ConcurrentSkipListMap<>();
/**
* [ Config Type -> [ Config Version Tag -> Config ] ]
*/
- private volatile Map<String, Map<String, Config>> allConfigs;
+ private final ConcurrentMap<String, ConcurrentMap<String, Config>> allConfigs = new ConcurrentHashMap<>();
/**
* [ ServiceName -> [ ServiceComponentName -> [ HostName -> [ ... ] ] ] ]
*/
- private Map<String, Map<String, Map<String, ServiceComponentHost>>> serviceComponentHosts;
+ private final ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<String, ServiceComponentHost>>> serviceComponentHosts = new ConcurrentHashMap<>();
/**
* [ HostName -> [ ... ] ]
*/
- private Map<String, List<ServiceComponentHost>> serviceComponentHostsByHost;
+ private final ConcurrentMap<String, List<ServiceComponentHost>> serviceComponentHostsByHost = new ConcurrentHashMap<>();
/**
* Map of existing config groups
*/
- private volatile Map<Long, ConfigGroup> clusterConfigGroups;
+ private final Map<Long, ConfigGroup> clusterConfigGroups = new ConcurrentHashMap<>();
/**
* Map of Request schedules for this cluster
*/
- private volatile Map<Long, RequestExecution> requestExecutions;
+ private final Map<Long, RequestExecution> requestExecutions = new ConcurrentHashMap<>();
private final ReadWriteLock clusterGlobalLock = new ReentrantReadWriteLock();
@@ -287,8 +286,6 @@ public class ClusterImpl implements Cluster {
@Inject
private StackDAO stackDAO;
- private volatile boolean svcHostsLoaded = false;
-
private volatile Multimap<String, String> serviceConfigTypes;
/**
@@ -311,20 +308,21 @@ public class ClusterImpl implements Cluster {
private Map<String, String> m_clusterPropertyCache = new ConcurrentHashMap<>();
@Inject
- public ClusterImpl(@Assisted ClusterEntity clusterEntity,
- Injector injector, AmbariEventPublisher eventPublisher) throws AmbariException {
- injector.injectMembers(this);
+ public ClusterImpl(@Assisted ClusterEntity clusterEntity, Injector injector,
+ AmbariEventPublisher eventPublisher)
+ throws AmbariException {
clusterId = clusterEntity.getClusterId();
clusterName = clusterEntity.getClusterName();
- serviceComponentHosts = new HashMap<>();
-
- serviceComponentHostsByHost = new HashMap<>();
-
- desiredStackVersion = new StackId(clusterEntity.getDesiredStack());
+ injector.injectMembers(this);
+ loadStackVersion();
+ loadServices();
+ loadServiceHostComponents();
+ loadConfigGroups();
cacheConfigurations();
+ loadRequestExecutions();
if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
StringUtils.isEmpty(desiredStackVersion.getStackVersion())) {
@@ -376,301 +374,195 @@ public class ClusterImpl implements Cluster {
* Make sure we load all the service host components.
* We need this for live status checks.
*/
- public void loadServiceHostComponents() {
- loadServices();
- if (svcHostsLoaded) {
- return;
- }
-
- clusterGlobalLock.writeLock().lock();
-
- try {
- LOG.info("Loading Service Host Components");
- if (svcHostsLoaded) {
- return;
- }
- if (services != null) {
- for (Entry<String, Service> serviceKV : services.entrySet()) {
- /* get all the service component hosts **/
- Service service = serviceKV.getValue();
- if (!serviceComponentHosts.containsKey(service.getName())) {
- serviceComponentHosts.put(service.getName(),
- new HashMap<String, Map<String, ServiceComponentHost>>());
+ private void loadServiceHostComponents() {
+ for (Entry<String, Service> serviceKV : services.entrySet()) {
+ /* get all the service component hosts **/
+ Service service = serviceKV.getValue();
+ if (!serviceComponentHosts.containsKey(service.getName())) {
+ serviceComponentHosts.put(service.getName(),
+ new ConcurrentHashMap<String, ConcurrentMap<String, ServiceComponentHost>>());
+ }
+
+ for (Entry<String, ServiceComponent> svcComponent : service.getServiceComponents().entrySet()) {
+ ServiceComponent comp = svcComponent.getValue();
+ String componentName = svcComponent.getKey();
+ if (!serviceComponentHosts.get(service.getName()).containsKey(componentName)) {
+ serviceComponentHosts.get(service.getName()).put(componentName,
+ new ConcurrentHashMap<String, ServiceComponentHost>());
+ }
+
+ /** Get Service Host Components **/
+ for (Entry<String, ServiceComponentHost> svchost : comp.getServiceComponentHosts().entrySet()) {
+ String hostname = svchost.getKey();
+ ServiceComponentHost svcHostComponent = svchost.getValue();
+ if (!serviceComponentHostsByHost.containsKey(hostname)) {
+ serviceComponentHostsByHost.put(hostname, new ArrayList<ServiceComponentHost>());
}
- for (Entry<String, ServiceComponent> svcComponent : service.getServiceComponents().entrySet()) {
- ServiceComponent comp = svcComponent.getValue();
- String componentName = svcComponent.getKey();
- if (!serviceComponentHosts.get(service.getName()).containsKey(
- componentName)) {
- serviceComponentHosts.get(service.getName()).put(componentName,
- new HashMap<String, ServiceComponentHost>());
- }
- /** Get Service Host Components **/
- for (Entry<String, ServiceComponentHost> svchost : comp.getServiceComponentHosts().entrySet()) {
- String hostname = svchost.getKey();
- ServiceComponentHost svcHostComponent = svchost.getValue();
- if (!serviceComponentHostsByHost.containsKey(hostname)) {
- serviceComponentHostsByHost.put(hostname,
- new ArrayList<ServiceComponentHost>());
- }
- List<ServiceComponentHost> compList = serviceComponentHostsByHost.get(hostname);
- compList.add(svcHostComponent);
- if (!serviceComponentHosts.get(service.getName()).get(
- componentName).containsKey(hostname)) {
- serviceComponentHosts.get(service.getName()).get(componentName).put(
- hostname, svcHostComponent);
- }
- }
+ List<ServiceComponentHost> compList = serviceComponentHostsByHost.get(hostname);
+ compList.add(svcHostComponent);
+
+ if (!serviceComponentHosts.get(service.getName()).get(componentName).containsKey(
+ hostname)) {
+ serviceComponentHosts.get(service.getName()).get(componentName).put(hostname,
+ svcHostComponent);
}
}
}
- svcHostsLoaded = true;
- } finally {
- clusterGlobalLock.writeLock().unlock();
}
}
private void loadServices() {
- if (services == null) {
- clusterGlobalLock.writeLock().lock();
+ ClusterEntity clusterEntity = getClusterEntity();
+ if (clusterEntity.getClusterServiceEntities().isEmpty()) {
+ return;
+ }
+ for (ClusterServiceEntity serviceEntity : clusterEntity.getClusterServiceEntities()) {
+ StackId stackId = getCurrentStackVersion();
try {
- if (services == null) {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- services = new TreeMap<String, Service>();
-
- if (!clusterEntity.getClusterServiceEntities().isEmpty()) {
- for (ClusterServiceEntity serviceEntity : clusterEntity.getClusterServiceEntities()) {
- StackId stackId = getCurrentStackVersion();
- try {
- if (ambariMetaInfo.getService(stackId.getStackName(),
- stackId.getStackVersion(), serviceEntity.getServiceName()) != null) {
- services.put(serviceEntity.getServiceName(),
- serviceFactory.createExisting(this, serviceEntity));
- }
- } catch (AmbariException e) {
- LOG.error(String.format(
- "Can not get service info: stackName=%s, stackVersion=%s, serviceName=%s",
- stackId.getStackName(), stackId.getStackVersion(),
- serviceEntity.getServiceName()));
- }
- }
- }
- }
+ if (ambariMetaInfo.getService(stackId.getStackName(),
+ stackId.getStackVersion(), serviceEntity.getServiceName()) != null) {
+ services.put(serviceEntity.getServiceName(),
+ serviceFactory.createExisting(this, serviceEntity));
}
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ } catch (AmbariException e) {
+ LOG.error(String.format(
+ "Can not get service info: stackName=%s, stackVersion=%s, serviceName=%s",
+ stackId.getStackName(), stackId.getStackVersion(),
+ serviceEntity.getServiceName()));
}
}
}
private void loadConfigGroups() {
- if (clusterConfigGroups == null) {
- clusterGlobalLock.writeLock().lock();
-
- try {
- if (clusterConfigGroups == null) {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- clusterConfigGroups = new HashMap<Long, ConfigGroup>();
- if (!clusterEntity.getConfigGroupEntities().isEmpty()) {
- for (ConfigGroupEntity configGroupEntity : clusterEntity.getConfigGroupEntities()) {
- clusterConfigGroups.put(configGroupEntity.getGroupId(),
- configGroupFactory.createExisting(this, configGroupEntity));
- }
- }
- }
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ ClusterEntity clusterEntity = getClusterEntity();
+ if (!clusterEntity.getConfigGroupEntities().isEmpty()) {
+ for (ConfigGroupEntity configGroupEntity : clusterEntity.getConfigGroupEntities()) {
+ clusterConfigGroups.put(configGroupEntity.getGroupId(),
+ configGroupFactory.createExisting(this, configGroupEntity));
}
}
}
private void loadRequestExecutions() {
- if (requestExecutions == null) {
- clusterGlobalLock.writeLock().lock();
- try {
- if (requestExecutions == null) {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- requestExecutions = new HashMap<Long, RequestExecution>();
- if (!clusterEntity.getRequestScheduleEntities().isEmpty()) {
- for (RequestScheduleEntity scheduleEntity : clusterEntity.getRequestScheduleEntities()) {
- requestExecutions.put(scheduleEntity.getScheduleId(),
- requestExecutionFactory.createExisting(this, scheduleEntity));
- }
- }
- }
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ ClusterEntity clusterEntity = getClusterEntity();
+ if (!clusterEntity.getRequestScheduleEntities().isEmpty()) {
+ for (RequestScheduleEntity scheduleEntity : clusterEntity.getRequestScheduleEntities()) {
+ requestExecutions.put(scheduleEntity.getScheduleId(),
+ requestExecutionFactory.createExisting(this, scheduleEntity));
}
}
}
@Override
public void addConfigGroup(ConfigGroup configGroup) throws AmbariException {
- loadConfigGroups();
- clusterGlobalLock.writeLock().lock();
- try {
- String hostList = "";
- if(LOG.isDebugEnabled()) {
- if (configGroup.getHosts() != null) {
- for (Host host : configGroup.getHosts().values()) {
- hostList += host.getHostName() + ", ";
- }
+ String hostList = "";
+ if(LOG.isDebugEnabled()) {
+ if (configGroup.getHosts() != null) {
+ for (Host host : configGroup.getHosts().values()) {
+ hostList += host.getHostName() + ", ";
}
}
+ }
- LOG.debug("Adding a new Config group" + ", clusterName = "
- + getClusterName() + ", groupName = " + configGroup.getName()
- + ", tag = " + configGroup.getTag() + " with hosts " + hostList);
-
- if (clusterConfigGroups.containsKey(configGroup.getId())) {
- // The loadConfigGroups will load all groups to memory
- LOG.debug("Config group already exists" + ", clusterName = "
- + getClusterName() + ", groupName = " + configGroup.getName()
- + ", groupId = " + configGroup.getId() + ", tag = "
- + configGroup.getTag());
- } else {
- clusterConfigGroups.put(configGroup.getId(), configGroup);
- }
+ LOG.debug("Adding a new Config group" + ", clusterName = "
+ + getClusterName() + ", groupName = " + configGroup.getName()
+ + ", tag = " + configGroup.getTag() + " with hosts " + hostList);
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ if (clusterConfigGroups.containsKey(configGroup.getId())) {
+ // The loadConfigGroups will load all groups to memory
+ LOG.debug("Config group already exists" + ", clusterName = "
+ + getClusterName() + ", groupName = " + configGroup.getName()
+ + ", groupId = " + configGroup.getId() + ", tag = "
+ + configGroup.getTag());
+ } else {
+ clusterConfigGroups.put(configGroup.getId(), configGroup);
}
}
@Override
public Map<Long, ConfigGroup> getConfigGroups() {
- loadConfigGroups();
- clusterGlobalLock.readLock().lock();
- try {
- return Collections.unmodifiableMap(clusterConfigGroups);
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ return Collections.unmodifiableMap(clusterConfigGroups);
}
@Override
public Map<Long, ConfigGroup> getConfigGroupsByHostname(String hostname)
throws AmbariException {
- loadConfigGroups();
Map<Long, ConfigGroup> configGroups = new HashMap<Long, ConfigGroup>();
- clusterGlobalLock.readLock().lock();
- try {
- for (Entry<Long, ConfigGroup> groupEntry : clusterConfigGroups.entrySet()) {
- Long id = groupEntry.getKey();
- ConfigGroup group = groupEntry.getValue();
- for (Host host : group.getHosts().values()) {
- if (StringUtils.equals(hostname, host.getHostName())) {
- configGroups.put(id, group);
- break;
- }
+ for (Entry<Long, ConfigGroup> groupEntry : clusterConfigGroups.entrySet()) {
+ Long id = groupEntry.getKey();
+ ConfigGroup group = groupEntry.getValue();
+ for (Host host : group.getHosts().values()) {
+ if (StringUtils.equals(hostname, host.getHostName())) {
+ configGroups.put(id, group);
+ break;
}
}
- } finally {
- clusterGlobalLock.readLock().unlock();
}
return configGroups;
}
@Override
public void addRequestExecution(RequestExecution requestExecution) throws AmbariException {
- loadRequestExecutions();
- clusterGlobalLock.writeLock().lock();
- try {
- LOG.info("Adding a new request schedule" + ", clusterName = "
- + getClusterName() + ", id = " + requestExecution.getId()
- + ", description = " + requestExecution.getDescription());
-
- if (requestExecutions.containsKey(requestExecution.getId())) {
- LOG.debug("Request schedule already exists" + ", clusterName = "
- + getClusterName() + ", id = " + requestExecution.getId()
- + ", description = " + requestExecution.getDescription());
- } else {
- requestExecutions.put(requestExecution.getId(), requestExecution);
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ LOG.info("Adding a new request schedule" + ", clusterName = " + getClusterName() + ", id = "
+ + requestExecution.getId() + ", description = " + requestExecution.getDescription());
+
+ if (requestExecutions.containsKey(requestExecution.getId())) {
+ LOG.debug(
+ "Request schedule already exists" + ", clusterName = " + getClusterName() + ", id = "
+ + requestExecution.getId() + ", description = " + requestExecution.getDescription());
+ } else {
+ requestExecutions.put(requestExecution.getId(), requestExecution);
}
}
@Override
public Map<Long, RequestExecution> getAllRequestExecutions() {
- loadRequestExecutions();
- clusterGlobalLock.readLock().lock();
- try {
- return Collections.unmodifiableMap(requestExecutions);
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ return Collections.unmodifiableMap(requestExecutions);
}
@Override
public void deleteRequestExecution(Long id) throws AmbariException {
- loadRequestExecutions();
- clusterGlobalLock.writeLock().lock();
- try {
- RequestExecution requestExecution = requestExecutions.get(id);
- if (requestExecution == null) {
- throw new AmbariException("Request schedule does not exists, "
- + "id = " + id);
- }
- LOG.info("Deleting request schedule" + ", clusterName = "
- + getClusterName() + ", id = " + requestExecution.getId()
- + ", description = " + requestExecution.getDescription());
-
- requestExecution.delete();
- requestExecutions.remove(id);
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ RequestExecution requestExecution = requestExecutions.get(id);
+ if (requestExecution == null) {
+ throw new AmbariException("Request schedule does not exists, " + "id = " + id);
}
+ LOG.info("Deleting request schedule" + ", clusterName = " + getClusterName() + ", id = "
+ + requestExecution.getId() + ", description = " + requestExecution.getDescription());
+
+ requestExecution.delete();
+ requestExecutions.remove(id);
}
@Override
public void deleteConfigGroup(Long id) throws AmbariException, AuthorizationException {
- loadConfigGroups();
- clusterGlobalLock.writeLock().lock();
- try {
- ConfigGroup configGroup = clusterConfigGroups.get(id);
- if (configGroup == null) {
- throw new ConfigGroupNotFoundException(getClusterName(), id.toString());
- }
+ ConfigGroup configGroup = clusterConfigGroups.get(id);
+ if (configGroup == null) {
+ throw new ConfigGroupNotFoundException(getClusterName(), id.toString());
+ }
- LOG.debug("Deleting Config group" + ", clusterName = " + getClusterName()
- + ", groupName = " + configGroup.getName() + ", groupId = "
- + configGroup.getId() + ", tag = " + configGroup.getTag());
+ LOG.debug("Deleting Config group" + ", clusterName = " + getClusterName()
+ + ", groupName = " + configGroup.getName() + ", groupId = "
+ + configGroup.getId() + ", tag = " + configGroup.getTag());
- configGroup.delete();
- clusterConfigGroups.remove(id);
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ configGroup.delete();
+ clusterConfigGroups.remove(id);
}
public ServiceComponentHost getServiceComponentHost(String serviceName,
String serviceComponentName, String hostname) throws AmbariException {
- loadServiceHostComponents();
- clusterGlobalLock.readLock().lock();
- try {
- if (!serviceComponentHosts.containsKey(serviceName)
- || !serviceComponentHosts.get(serviceName).containsKey(
- serviceComponentName)
- || !serviceComponentHosts.get(serviceName).get(serviceComponentName).containsKey(
- hostname)) {
- throw new ServiceComponentHostNotFoundException(getClusterName(),
- serviceName, serviceComponentName, hostname);
- }
- return serviceComponentHosts.get(serviceName).get(serviceComponentName).get(
- hostname);
- } finally {
- clusterGlobalLock.readLock().unlock();
+ if (!serviceComponentHosts.containsKey(serviceName)
+ || !serviceComponentHosts.get(serviceName).containsKey(
+ serviceComponentName)
+ || !serviceComponentHosts.get(serviceName).get(serviceComponentName).containsKey(
+ hostname)) {
+ throw new ServiceComponentHostNotFoundException(getClusterName(),
+ serviceName, serviceComponentName, hostname);
}
+ return serviceComponentHosts.get(serviceName).get(serviceComponentName).get(
+ hostname);
}
@Override
@@ -681,21 +573,14 @@ public class ClusterImpl implements Cluster {
@Override
public void setClusterName(String clusterName) {
String oldName = null;
- clusterGlobalLock.writeLock().lock();
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- oldName = clusterEntity.getClusterName();
- clusterEntity.setClusterName(clusterName);
+ ClusterEntity clusterEntity = getClusterEntity();
+ oldName = clusterEntity.getClusterName();
+ clusterEntity.setClusterName(clusterName);
- // RollbackException possibility if UNIQUE constraint violated
- clusterEntity = clusterDAO.merge(clusterEntity);
- clusters.updateClusterName(oldName, clusterName);
- this.clusterName = clusterName;
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ // RollbackException possibility if UNIQUE constraint violated
+ clusterEntity = clusterDAO.merge(clusterEntity);
+ clusters.updateClusterName(oldName, clusterName);
+ this.clusterName = clusterName;
// if the name changed, fire an event
if (!StringUtils.equals(oldName, clusterName)) {
@@ -707,32 +592,26 @@ public class ClusterImpl implements Cluster {
@Override
public Long getResourceId() {
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- ResourceEntity resourceEntity = clusterEntity.getResource();
- if (resourceEntity == null) {
- LOG.warn("There is no resource associated with this cluster:\n\tCluster Name: {}\n\tCluster ID: {}",
+
+ ResourceEntity resourceEntity = clusterEntity.getResource();
+ if (resourceEntity == null) {
+ LOG.warn(
+ "There is no resource associated with this cluster:\n\tCluster Name: {}\n\tCluster ID: {}",
getClusterName(), getClusterId());
- return null;
- } else {
- return resourceEntity.getId();
- }
+ return null;
+ } else {
+ return resourceEntity.getId();
}
- return null;
}
@Override
public void addServiceComponentHosts(Collection<ServiceComponentHost> serviceComponentHosts) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
- try {
- for (ServiceComponentHost serviceComponentHost : serviceComponentHosts) {
- Service service = getService(serviceComponentHost.getServiceName());
- ServiceComponent serviceComponent = service.getServiceComponent(serviceComponentHost.getServiceComponentName());
- serviceComponent.addServiceComponentHost(serviceComponentHost);
- }
- persistServiceComponentHosts(serviceComponentHosts);
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ for (ServiceComponentHost serviceComponentHost : serviceComponentHosts) {
+ Service service = getService(serviceComponentHost.getServiceName());
+ ServiceComponent serviceComponent = service.getServiceComponent(serviceComponentHost.getServiceComponentName());
+ serviceComponent.addServiceComponentHost(serviceComponentHost);
}
+ persistServiceComponentHosts(serviceComponentHosts);
}
@Transactional
@@ -750,68 +629,61 @@ public class ClusterImpl implements Cluster {
svcCompHost.getHostName());
}
- loadServiceHostComponents();
-
final String hostname = svcCompHost.getHostName();
final String serviceName = svcCompHost.getServiceName();
final String componentName = svcCompHost.getServiceComponentName();
Set<Cluster> cs = clusters.getClustersForHost(hostname);
- clusterGlobalLock.writeLock().lock();
-
- try {
- boolean clusterFound = false;
- Iterator<Cluster> iter = cs.iterator();
- while (iter.hasNext()) {
- Cluster c = iter.next();
- if (c.getClusterId() == getClusterId()) {
- clusterFound = true;
- break;
- }
- }
-
- if (!clusterFound) {
- throw new AmbariException("Host does not belong this cluster"
- + ", hostname=" + hostname + ", clusterName=" + getClusterName()
- + ", clusterId=" + getClusterId());
+ boolean clusterFound = false;
+ Iterator<Cluster> iter = cs.iterator();
+ while (iter.hasNext()) {
+ Cluster c = iter.next();
+ if (c.getClusterId() == getClusterId()) {
+ clusterFound = true;
+ break;
}
+ }
- if (!serviceComponentHosts.containsKey(serviceName)) {
- serviceComponentHosts.put(serviceName,
- new HashMap<String, Map<String, ServiceComponentHost>>());
- }
+ if (!clusterFound) {
+ throw new AmbariException("Host does not belong this cluster"
+ + ", hostname=" + hostname + ", clusterName=" + getClusterName()
+ + ", clusterId=" + getClusterId());
+ }
- if (!serviceComponentHosts.get(serviceName).containsKey(componentName)) {
- serviceComponentHosts.get(serviceName).put(componentName,
- new HashMap<String, ServiceComponentHost>());
- }
+ if (!serviceComponentHosts.containsKey(serviceName)) {
+ serviceComponentHosts.put(serviceName,
+ new ConcurrentHashMap<String, ConcurrentMap<String, ServiceComponentHost>>());
+ }
- if (serviceComponentHosts.get(serviceName).get(componentName).containsKey(
- hostname)) {
- throw new AmbariException("Duplicate entry for ServiceComponentHost"
- + ", serviceName=" + serviceName + ", serviceComponentName"
- + componentName + ", hostname= " + hostname);
- }
+ if (!serviceComponentHosts.get(serviceName).containsKey(componentName)) {
+ serviceComponentHosts.get(serviceName).put(componentName,
+ new ConcurrentHashMap<String, ServiceComponentHost>());
+ }
- if (!serviceComponentHostsByHost.containsKey(hostname)) {
- serviceComponentHostsByHost.put(hostname,
- new ArrayList<ServiceComponentHost>());
- }
+ if (serviceComponentHosts.get(serviceName).get(componentName).containsKey(
+ hostname)) {
+ throw new AmbariException("Duplicate entry for ServiceComponentHost"
+ + ", serviceName=" + serviceName + ", serviceComponentName"
+ + componentName + ", hostname= " + hostname);
+ }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a new ServiceComponentHost" + ", clusterName="
- + getClusterName() + ", clusterId=" + getClusterId()
- + ", serviceName=" + serviceName + ", serviceComponentName"
- + componentName + ", hostname= " + hostname);
- }
+ if (!serviceComponentHostsByHost.containsKey(hostname)) {
+ serviceComponentHostsByHost.put(hostname,
+ new ArrayList<ServiceComponentHost>());
+ }
- serviceComponentHosts.get(serviceName).get(componentName).put(hostname,
- svcCompHost);
- serviceComponentHostsByHost.get(hostname).add(svcCompHost);
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding a new ServiceComponentHost" + ", clusterName="
+ + getClusterName() + ", clusterId=" + getClusterId()
+ + ", serviceName=" + serviceName + ", serviceComponentName"
+ + componentName + ", hostname= " + hostname);
}
+
+ serviceComponentHosts.get(serviceName).get(componentName).put(hostname,
+ svcCompHost);
+
+ serviceComponentHostsByHost.get(hostname).add(svcCompHost);
}
@Override
@@ -824,76 +696,69 @@ public class ClusterImpl implements Cluster {
svcCompHost.getHostName());
}
- loadServiceHostComponents();
-
final String hostname = svcCompHost.getHostName();
final String serviceName = svcCompHost.getServiceName();
final String componentName = svcCompHost.getServiceComponentName();
Set<Cluster> cs = clusters.getClustersForHost(hostname);
- clusterGlobalLock.writeLock().lock();
- try {
- boolean clusterFound = false;
- Iterator<Cluster> iter = cs.iterator();
- while (iter.hasNext()) {
- Cluster c = iter.next();
- if (c.getClusterId() == getClusterId()) {
- clusterFound = true;
- break;
- }
+ boolean clusterFound = false;
+ Iterator<Cluster> iter = cs.iterator();
+ while (iter.hasNext()) {
+ Cluster c = iter.next();
+ if (c.getClusterId() == getClusterId()) {
+ clusterFound = true;
+ break;
}
+ }
- if (!clusterFound) {
- throw new AmbariException("Host does not belong this cluster"
- + ", hostname=" + hostname + ", clusterName=" + getClusterName()
- + ", clusterId=" + getClusterId());
- }
+ if (!clusterFound) {
+ throw new AmbariException("Host does not belong this cluster"
+ + ", hostname=" + hostname + ", clusterName=" + getClusterName()
+ + ", clusterId=" + getClusterId());
+ }
- if (!serviceComponentHosts.containsKey(serviceName)
- || !serviceComponentHosts.get(serviceName).containsKey(componentName)
- || !serviceComponentHosts.get(serviceName).get(componentName).containsKey(
- hostname)) {
- throw new AmbariException("Invalid entry for ServiceComponentHost"
- + ", serviceName=" + serviceName + ", serviceComponentName"
- + componentName + ", hostname= " + hostname);
- }
+ if (!serviceComponentHosts.containsKey(serviceName)
+ || !serviceComponentHosts.get(serviceName).containsKey(componentName)
+ || !serviceComponentHosts.get(serviceName).get(componentName).containsKey(
+ hostname)) {
+ throw new AmbariException("Invalid entry for ServiceComponentHost"
+ + ", serviceName=" + serviceName + ", serviceComponentName"
+ + componentName + ", hostname= " + hostname);
+ }
- if (!serviceComponentHostsByHost.containsKey(hostname)) {
- throw new AmbariException("Invalid host entry for ServiceComponentHost"
- + ", serviceName=" + serviceName + ", serviceComponentName"
- + componentName + ", hostname= " + hostname);
- }
+ if (!serviceComponentHostsByHost.containsKey(hostname)) {
+ throw new AmbariException("Invalid host entry for ServiceComponentHost"
+ + ", serviceName=" + serviceName + ", serviceComponentName"
+ + componentName + ", hostname= " + hostname);
+ }
- ServiceComponentHost schToRemove = null;
- for (ServiceComponentHost sch : serviceComponentHostsByHost.get(hostname)) {
- if (sch.getServiceName().equals(serviceName)
- && sch.getServiceComponentName().equals(componentName)
- && sch.getHostName().equals(hostname)) {
- schToRemove = sch;
- break;
- }
+ ServiceComponentHost schToRemove = null;
+ for (ServiceComponentHost sch : serviceComponentHostsByHost.get(hostname)) {
+ if (sch.getServiceName().equals(serviceName)
+ && sch.getServiceComponentName().equals(componentName)
+ && sch.getHostName().equals(hostname)) {
+ schToRemove = sch;
+ break;
}
+ }
- if (schToRemove == null) {
- LOG.warn("Unavailable in per host cache. ServiceComponentHost"
- + ", serviceName=" + serviceName
- + ", serviceComponentName" + componentName
- + ", hostname= " + hostname);
- }
+ if (schToRemove == null) {
+ LOG.warn("Unavailable in per host cache. ServiceComponentHost"
+ + ", serviceName=" + serviceName
+ + ", serviceComponentName" + componentName
+ + ", hostname= " + hostname);
+ }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Removing a ServiceComponentHost" + ", clusterName="
- + getClusterName() + ", clusterId=" + getClusterId()
- + ", serviceName=" + serviceName + ", serviceComponentName"
- + componentName + ", hostname= " + hostname);
- }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing a ServiceComponentHost" + ", clusterName="
+ + getClusterName() + ", clusterId=" + getClusterId()
+ + ", serviceName=" + serviceName + ", serviceComponentName"
+ + componentName + ", hostname= " + hostname);
+ }
- serviceComponentHosts.get(serviceName).get(componentName).remove(hostname);
- if (schToRemove != null) {
- serviceComponentHostsByHost.get(hostname).remove(schToRemove);
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ serviceComponentHosts.get(serviceName).get(componentName).remove(hostname);
+ if (schToRemove != null) {
+ serviceComponentHostsByHost.get(hostname).remove(schToRemove);
}
}
@@ -905,19 +770,13 @@ public class ClusterImpl implements Cluster {
}
@Override
- public List<ServiceComponentHost> getServiceComponentHosts(
- String hostname) {
- loadServiceHostComponents();
- clusterGlobalLock.readLock().lock();
- try {
- if (serviceComponentHostsByHost.containsKey(hostname)) {
- return new CopyOnWriteArrayList<ServiceComponentHost>(
- serviceComponentHostsByHost.get(hostname));
- }
- return new ArrayList<ServiceComponentHost>();
- } finally {
- clusterGlobalLock.readLock().unlock();
+ public List<ServiceComponentHost> getServiceComponentHosts(String hostname) {
+ List<ServiceComponentHost> serviceComponentHosts = serviceComponentHostsByHost.get(hostname);
+ if (null != serviceComponentHosts) {
+ return new CopyOnWriteArrayList<ServiceComponentHost>(serviceComponentHosts);
}
+
+ return new ArrayList<ServiceComponentHost>();
}
@Override
@@ -961,21 +820,16 @@ public class ClusterImpl implements Cluster {
public List<ServiceComponentHost> getServiceComponentHosts(String serviceName, String componentName) {
ArrayList<ServiceComponentHost> foundItems = new ArrayList<ServiceComponentHost>();
- loadServiceHostComponents();
- clusterGlobalLock.readLock().lock();
- try {
- Map<String, Map<String, ServiceComponentHost>> foundByService = serviceComponentHosts.get(serviceName);
- if (foundByService != null) {
- if (componentName == null) {
- for(Map<String, ServiceComponentHost> foundByComponent :foundByService.values()) {
- foundItems.addAll(foundByComponent.values());
- }
- } else if (foundByService.containsKey(componentName)) {
- foundItems.addAll(foundByService.get(componentName).values());
+ ConcurrentMap<String, ConcurrentMap<String, ServiceComponentHost>> foundByService = serviceComponentHosts.get(
+ serviceName);
+ if (foundByService != null) {
+ if (componentName == null) {
+ for (Map<String, ServiceComponentHost> foundByComponent : foundByService.values()) {
+ foundItems.addAll(foundByComponent.values());
}
+ } else if (foundByService.containsKey(componentName)) {
+ foundItems.addAll(foundByService.get(componentName).values());
}
- } finally {
- clusterGlobalLock.readLock().unlock();
}
return foundItems;
@@ -983,76 +837,44 @@ public class ClusterImpl implements Cluster {
@Override
public void addService(Service service) {
- loadServices();
- clusterGlobalLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a new Service" + ", clusterName=" + getClusterName()
- + ", clusterId=" + getClusterId() + ", serviceName="
- + service.getName());
- }
- services.put(service.getName(), service);
- } finally {
- clusterGlobalLock.writeLock().unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding a new Service" + ", clusterName=" + getClusterName() + ", clusterId="
+ + getClusterId() + ", serviceName=" + service.getName());
}
+ services.put(service.getName(), service);
}
@Override
public Service addService(String serviceName) throws AmbariException {
- loadServices();
- clusterGlobalLock.writeLock().lock();
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a new Service" + ", clusterName=" + getClusterName()
+ if (services.containsKey(serviceName)) {
+ throw new AmbariException("Service already exists" + ", clusterName=" + getClusterName()
+ ", clusterId=" + getClusterId() + ", serviceName=" + serviceName);
- }
- if (services.containsKey(serviceName)) {
- throw new AmbariException("Service already exists" + ", clusterName="
- + getClusterName() + ", clusterId=" + getClusterId()
- + ", serviceName=" + serviceName);
- }
- Service s = serviceFactory.createNew(this, serviceName);
- services.put(s.getName(), s);
- return s;
- } finally {
- clusterGlobalLock.writeLock().unlock();
}
+
+ Service service = serviceFactory.createNew(this, serviceName);
+ addService(service);
+
+ return service;
}
@Override
public Service getService(String serviceName) throws AmbariException {
- loadServices();
- clusterGlobalLock.readLock().lock();
- try {
- if (!services.containsKey(serviceName)) {
- throw new ServiceNotFoundException(getClusterName(), serviceName);
- }
- return services.get(serviceName);
- } finally {
- clusterGlobalLock.readLock().unlock();
+ Service service = services.get(serviceName);
+ if (null == service) {
+ throw new ServiceNotFoundException(getClusterName(), serviceName);
}
+
+ return service;
}
@Override
public Map<String, Service> getServices() {
- loadServices();
- clusterGlobalLock.readLock().lock();
- try {
- return new HashMap<String, Service>(services);
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ return new HashMap<String, Service>(services);
}
@Override
public StackId getDesiredStackVersion() {
- loadStackVersion();
- clusterGlobalLock.readLock().lock();
- try {
- return desiredStackVersion;
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ return desiredStackVersion;
}
@Override
@@ -1075,25 +897,24 @@ public class ClusterImpl implements Cluster {
StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- clusterEntity.setDesiredStack(stackEntity);
- clusterEntity = clusterDAO.merge(clusterEntity);
- if (cascade) {
- for (Service service : getServices().values()) {
- service.setDesiredStackVersion(stackId);
+ clusterEntity.setDesiredStack(stackEntity);
+ clusterEntity = clusterDAO.merge(clusterEntity);
+
+ if (cascade) {
+ for (Service service : getServices().values()) {
+ service.setDesiredStackVersion(stackId);
- for (ServiceComponent sc : service.getServiceComponents().values()) {
- sc.setDesiredStackVersion(stackId);
+ for (ServiceComponent sc : service.getServiceComponents().values()) {
+ sc.setDesiredStackVersion(stackId);
- for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
- sch.setDesiredStackVersion(stackId);
- }
+ for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
+ sch.setDesiredStackVersion(stackId);
}
}
}
- loadServiceConfigTypes();
}
+ loadServiceConfigTypes();
} finally {
clusterGlobalLock.writeLock().unlock();
}
@@ -1101,89 +922,55 @@ public class ClusterImpl implements Cluster {
@Override
public StackId getCurrentStackVersion() {
- clusterGlobalLock.readLock().lock();
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- ClusterStateEntity clusterStateEntity = clusterEntity.getClusterStateEntity();
- if (clusterStateEntity != null) {
- StackEntity currentStackEntity = clusterStateEntity.getCurrentStack();
- return new StackId(currentStackEntity);
- }
- }
+ ClusterEntity clusterEntity = getClusterEntity();
- return null;
- } finally {
- clusterGlobalLock.readLock().unlock();
+ ClusterStateEntity clusterStateEntity = clusterEntity.getClusterStateEntity();
+ if (clusterStateEntity != null) {
+ StackEntity currentStackEntity = clusterStateEntity.getCurrentStack();
+ return new StackId(currentStackEntity);
}
+
+ return null;
}
@Override
public State getProvisioningState() {
- clusterGlobalLock.readLock().lock();
State provisioningState = null;
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- provisioningState = clusterEntity.getProvisioningState();
-
- if (null == provisioningState) {
- provisioningState = State.INIT;
- }
- }
+ ClusterEntity clusterEntity = getClusterEntity();
+ provisioningState = clusterEntity.getProvisioningState();
- return provisioningState;
- } finally {
- clusterGlobalLock.readLock().unlock();
+ if (null == provisioningState) {
+ provisioningState = State.INIT;
}
+
+ return provisioningState;
}
@Override
public void setProvisioningState(State provisioningState) {
- clusterGlobalLock.writeLock().lock();
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- clusterEntity.setProvisioningState(provisioningState);
- clusterEntity = clusterDAO.merge(clusterEntity);
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ ClusterEntity clusterEntity = getClusterEntity();
+ clusterEntity.setProvisioningState(provisioningState);
+ clusterEntity = clusterDAO.merge(clusterEntity);
}
@Override
public SecurityType getSecurityType() {
- clusterGlobalLock.readLock().lock();
SecurityType securityType = null;
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- securityType = clusterEntity.getSecurityType();
-
- if (null == securityType) {
- securityType = SecurityType.NONE;
- }
- }
+ ClusterEntity clusterEntity = getClusterEntity();
+ securityType = clusterEntity.getSecurityType();
- return securityType;
- } finally {
- clusterGlobalLock.readLock().unlock();
+ if (null == securityType) {
+ securityType = SecurityType.NONE;
}
+
+ return securityType;
}
@Override
public void setSecurityType(SecurityType securityType) {
- clusterGlobalLock.writeLock().lock();
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- clusterEntity.setSecurityType(securityType);
- clusterEntity = clusterDAO.merge(clusterEntity);
- }
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
+ ClusterEntity clusterEntity = getClusterEntity();
+ clusterEntity.setSecurityType(securityType);
+ clusterEntity = clusterDAO.merge(clusterEntity);
}
/**
@@ -1859,129 +1646,127 @@ public class ClusterImpl implements Cluster {
clusterGlobalLock.writeLock().lock();
try {
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- ClusterVersionEntity existingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
+ ClusterVersionEntity existingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
getClusterName(), stackId, version);
- if (existingClusterVersion == null) {
- throw new AmbariException(
- "Existing cluster version not found for cluster="
- + getClusterName() + ", stack=" + stackId + ", version="
- + version);
- }
+ if (existingClusterVersion == null) {
+ throw new AmbariException("Existing cluster version not found for cluster="
+ + getClusterName() + ", stack=" + stackId + ", version=" + version);
+ }
- // NOOP
- if (existingClusterVersion.getState() == state) {
- return;
- }
+ // NOOP
+ if (existingClusterVersion.getState() == state) {
+ return;
+ }
- switch (existingClusterVersion.getState()) {
- case CURRENT:
- // If CURRENT state is changed here cluster will not have CURRENT
- // state.
- // CURRENT state will be changed to INSTALLED when another CURRENT
- // state is added.
- // allowedStates.add(RepositoryVersionState.INSTALLED);
- break;
- case INSTALLING:
- allowedStates.add(RepositoryVersionState.INSTALLED);
- allowedStates.add(RepositoryVersionState.INSTALL_FAILED);
- allowedStates.add(RepositoryVersionState.OUT_OF_SYNC);
- if (clusterVersionDAO.findByClusterAndStateCurrent(getClusterName()) == null) {
- allowedStates.add(RepositoryVersionState.CURRENT);
- }
- break;
- case INSTALL_FAILED:
- allowedStates.add(RepositoryVersionState.INSTALLING);
- break;
- case INSTALLED:
- allowedStates.add(RepositoryVersionState.INSTALLING);
- allowedStates.add(RepositoryVersionState.OUT_OF_SYNC);
- allowedStates.add(RepositoryVersionState.CURRENT);
- break;
- case OUT_OF_SYNC:
- allowedStates.add(RepositoryVersionState.INSTALLING);
- break;
- case INIT:
+ switch (existingClusterVersion.getState()) {
+ case CURRENT:
+ // If CURRENT state is changed here cluster will not have CURRENT
+ // state.
+ // CURRENT state will be changed to INSTALLED when another CURRENT
+ // state is added.
+ // allowedStates.add(RepositoryVersionState.INSTALLED);
+ break;
+ case INSTALLING:
+ allowedStates.add(RepositoryVersionState.INSTALLED);
+ allowedStates.add(RepositoryVersionState.INSTALL_FAILED);
+ allowedStates.add(RepositoryVersionState.OUT_OF_SYNC);
+ if (clusterVersionDAO.findByClusterAndStateCurrent(getClusterName()) == null) {
allowedStates.add(RepositoryVersionState.CURRENT);
- break;
- }
+ }
+ break;
+ case INSTALL_FAILED:
+ allowedStates.add(RepositoryVersionState.INSTALLING);
+ break;
+ case INSTALLED:
+ allowedStates.add(RepositoryVersionState.INSTALLING);
+ allowedStates.add(RepositoryVersionState.OUT_OF_SYNC);
+ allowedStates.add(RepositoryVersionState.CURRENT);
+ break;
+ case OUT_OF_SYNC:
+ allowedStates.add(RepositoryVersionState.INSTALLING);
+ break;
+ case INIT:
+ allowedStates.add(RepositoryVersionState.CURRENT);
+ break;
+ }
- if (!allowedStates.contains(state)) {
- throw new AmbariException("Invalid cluster version transition from "
+ if (!allowedStates.contains(state)) {
+ throw new AmbariException("Invalid cluster version transition from "
+ existingClusterVersion.getState() + " to " + state);
- }
+ }
- // There must be at most one cluster version whose state is CURRENT at
- // all times.
- if (state == RepositoryVersionState.CURRENT) {
- ClusterVersionEntity currentVersion = clusterVersionDAO.findByClusterAndStateCurrent(getClusterName());
- if (currentVersion != null) {
- currentVersion.setState(RepositoryVersionState.INSTALLED);
- currentVersion = clusterVersionDAO.merge(currentVersion);
- }
+ // There must be at most one cluster version whose state is CURRENT at
+ // all times.
+ if (state == RepositoryVersionState.CURRENT) {
+ ClusterVersionEntity currentVersion = clusterVersionDAO.findByClusterAndStateCurrent(
+ getClusterName());
+ if (currentVersion != null) {
+ currentVersion.setState(RepositoryVersionState.INSTALLED);
+ currentVersion = clusterVersionDAO.merge(currentVersion);
}
+ }
- existingClusterVersion.setState(state);
- existingClusterVersion.setEndTime(System.currentTimeMillis());
- existingClusterVersion = clusterVersionDAO.merge(existingClusterVersion);
+ existingClusterVersion.setState(state);
+ existingClusterVersion.setEndTime(System.currentTimeMillis());
+ existingClusterVersion = clusterVersionDAO.merge(existingClusterVersion);
- if (state == RepositoryVersionState.CURRENT) {
- for (HostEntity hostEntity : clusterEntity.getHostEntities()) {
- if (hostHasReportables(existingClusterVersion.getRepositoryVersion(), hostEntity)) {
- continue;
- }
+ if (state == RepositoryVersionState.CURRENT) {
+ for (HostEntity hostEntity : clusterEntity.getHostEntities()) {
+ if (hostHasReportables(existingClusterVersion.getRepositoryVersion(), hostEntity)) {
+ continue;
+ }
- Collection<HostVersionEntity> versions = hostVersionDAO.findByHost(hostEntity.getHostName());
+ Collection<HostVersionEntity> versions = hostVersionDAO.findByHost(
+ hostEntity.getHostName());
- HostVersionEntity target = null;
- if (null != versions) {
- // Set anything that was previously marked CURRENT as INSTALLED, and
- // the matching version as CURRENT
- for (HostVersionEntity entity : versions) {
- if (entity.getRepositoryVersion().getId().equals(
+ HostVersionEntity target = null;
+ if (null != versions) {
+ // Set anything that was previously marked CURRENT as INSTALLED, and
+ // the matching version as CURRENT
+ for (HostVersionEntity entity : versions) {
+ if (entity.getRepositoryVersion().getId().equals(
existingClusterVersion.getRepositoryVersion().getId())) {
- target = entity;
- target.setState(state);
- target = hostVersionDAO.merge(target);
- } else if (entity.getState() == RepositoryVersionState.CURRENT) {
- entity.setState(RepositoryVersionState.INSTALLED);
- entity = hostVersionDAO.merge(entity);
- }
+ target = entity;
+ target.setState(state);
+ target = hostVersionDAO.merge(target);
+ } else if (entity.getState() == RepositoryVersionState.CURRENT) {
+ entity.setState(RepositoryVersionState.INSTALLED);
+ entity = hostVersionDAO.merge(entity);
}
}
+ }
- if (null == target) {
- // If no matching version was found, create one with the desired
- // state
- HostVersionEntity hve = new HostVersionEntity(hostEntity,
+ if (null == target) {
+ // If no matching version was found, create one with the desired
+ // state
+ HostVersionEntity hve = new HostVersionEntity(hostEntity,
existingClusterVersion.getRepositoryVersion(), state);
- LOG.info("Creating host version for {}, state={}, repo={} (repo_id={})",
- hve.getHostName(), hve.getState(),
- hve.getRepositoryVersion().getVersion(), hve.getRepositoryVersion().getId());
+ LOG.info("Creating host version for {}, state={}, repo={} (repo_id={})",
+ hve.getHostName(), hve.getState(), hve.getRepositoryVersion().getVersion(),
+ hve.getRepositoryVersion().getId());
- hostVersionDAO.create(hve);
- }
+ hostVersionDAO.create(hve);
}
+ }
- // when setting the cluster's state to current, we must also
- // bring the desired stack and current stack in line with each other
- StackEntity desiredStackEntity = clusterEntity.getDesiredStack();
- StackId desiredStackId = new StackId(desiredStackEntity);
+ // when setting the cluster's state to current, we must also
+ // bring the desired stack and current stack in line with each other
+ StackEntity desiredStackEntity = clusterEntity.getDesiredStack();
+ StackId desiredStackId = new StackId(desiredStackEntity);
- // if the desired stack ID doesn't match the target when setting the
- // cluster to CURRENT, then there's a problem
- if (!desiredStackId.equals(stackId)) {
- String message = MessageFormat.format(
+ // if the desired stack ID doesn't match the target when setting the
+ // cluster to CURRENT, then there's a problem
+ if (!desiredStackId.equals(stackId)) {
+ String message = MessageFormat.format(
"The desired stack ID {0} must match {1} when transitioning the cluster''s state to {2}",
desiredStackId, stackId, RepositoryVersionState.CURRENT);
- throw new AmbariException(message);
- }
-
- setCurrentStackVersion(stackId);
+ throw new AmbariException(message);
}
+
+ setCurrentStackVersion(stackId);
}
} catch (RollbackException e) {
String message = MessageFormat.format(
@@ -2030,22 +1815,21 @@ public class ClusterImpl implements Cluster {
stackId.getStackVersion());
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- ClusterStateEntity clusterStateEntity = clusterStateDAO.findByPK(clusterEntity.getClusterId());
- if (clusterStateEntity == null) {
- clusterStateEntity = new ClusterStateEntity();
- clusterStateEntity.setClusterId(clusterEntity.getClusterId());
- clusterStateEntity.setCurrentStack(stackEntity);
- clusterStateEntity.setClusterEntity(clusterEntity);
- clusterStateDAO.create(clusterStateEntity);
- clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
- clusterEntity.setClusterStateEntity(clusterStateEntity);
- clusterEntity = clusterDAO.merge(clusterEntity);
- } else {
- clusterStateEntity.setCurrentStack(stackEntity);
- clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
- clusterEntity = clusterDAO.merge(clusterEntity);
- }
+ ClusterStateEntity clusterStateEntity = clusterStateDAO.findByPK(
+ clusterEntity.getClusterId());
+ if (clusterStateEntity == null) {
+ clusterStateEntity = new ClusterStateEntity();
+ clusterStateEntity.setClusterId(clusterEntity.getClusterId());
+ clusterStateEntity.setCurrentStack(stackEntity);
+ clusterStateEntity.setClusterEntity(clusterEntity);
+ clusterStateDAO.create(clusterStateEntity);
+ clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
+ clusterEntity.setClusterStateEntity(clusterStateEntity);
+ clusterEntity = clusterDAO.merge(clusterEntity);
+ } else {
+ clusterStateEntity.setCurrentStack(stackEntity);
+ clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
+ clusterEntity = clusterDAO.merge(clusterEntity);
}
} catch (RollbackException e) {
LOG.warn("Unable to set version " + stackId + " for cluster "
@@ -2059,7 +1843,6 @@ public class ClusterImpl implements Cluster {
@Override
public Map<String, Config> getConfigsByType(String configType) {
- loadConfigurations();
clusterGlobalLock.readLock().lock();
try {
if (!allConfigs.containsKey(configType)) {
@@ -2074,7 +1857,6 @@ public class ClusterImpl implements Cluster {
@Override
public Config getConfig(String configType, String versionTag) {
- loadConfigurations();
clusterGlobalLock.readLock().lock();
try {
if (!allConfigs.containsKey(configType)
@@ -2094,11 +1876,13 @@ public class ClusterImpl implements Cluster {
if (!allConfigs.containsKey(configType)) {
return null;
}
- for(Map.Entry<String, Config> entry: allConfigs.get(configType).entrySet()) {
- if(entry.getValue().getVersion().equals(configVersion)) {
+
+ for (Map.Entry<String, Config> entry : allConfigs.get(configType).entrySet()) {
+ if (entry.getValue().getVersion().equals(configVersion)) {
return entry.getValue();
}
}
+
return null;
} finally {
clusterGlobalLock.readLock().unlock();
@@ -2107,14 +1891,14 @@ public class ClusterImpl implements Cluster {
@Override
public void addConfig(Config config) {
- loadConfigurations();
+ if (config.getType() == null || config.getType().isEmpty()) {
+ throw new IllegalArgumentException("Config type cannot be empty");
+ }
+
clusterGlobalLock.writeLock().lock();
try {
- if (config.getType() == null || config.getType().isEmpty()) {
- throw new IllegalArgumentException("Config type cannot be empty");
- }
if (!allConfigs.containsKey(config.getType())) {
- allConfigs.put(config.getType(), new HashMap<String, Config>());
+ allConfigs.put(config.getType(), new ConcurrentHashMap<String, Config>());
}
allConfigs.get(config.getType()).put(config.getTag(), config);
@@ -2125,11 +1909,10 @@ public class ClusterImpl implements Cluster {
@Override
public Collection<Config> getAllConfigs() {
- loadConfigurations();
clusterGlobalLock.readLock().lock();
try {
List<Config> list = new ArrayList<Config>();
- for (Entry<String, Map<String, Config>> entry : allConfigs.entrySet()) {
+ for (Entry<String, ConcurrentMap<String, Config>> entry : allConfigs.entrySet()) {
for (Config config : entry.getValue().values()) {
list.add(config);
}
@@ -2143,44 +1926,31 @@ public class ClusterImpl implements Cluster {
@Override
public ClusterResponse convertToResponse()
throws AmbariException {
- loadStackVersion();
String clusterName = getClusterName();
Map<String, Host> hosts = clusters.getHostsForCluster(clusterName);
- clusterGlobalLock.readLock().lock();
- try {
- return new ClusterResponse(getClusterId(), clusterName,
- getProvisioningState(), getSecurityType(), hosts.keySet(),
- hosts.size(), getDesiredStackVersion().getStackId(),
- getClusterHealthReport(hosts));
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+
+ return new ClusterResponse(getClusterId(), clusterName,
+ getProvisioningState(), getSecurityType(), hosts.keySet(),
+ hosts.size(), getDesiredStackVersion().getStackId(),
+ getClusterHealthReport(hosts));
}
@Override
public void debugDump(StringBuilder sb) {
- loadServices();
- loadStackVersion();
- clusterGlobalLock.readLock().lock();
- try {
- sb.append("Cluster={ clusterName=").append(getClusterName()).append(
- ", clusterId=").append(getClusterId()).append(
- ", desiredStackVersion=").append(desiredStackVersion.getStackId()).append(
- ", services=[ ");
- boolean first = true;
- for (Service s : services.values()) {
- if (!first) {
- sb.append(" , ");
- }
- first = false;
- sb.append("\n ");
- s.debugDump(sb);
- sb.append(' ');
+ sb.append("Cluster={ clusterName=").append(getClusterName()).append(", clusterId=").append(
+ getClusterId()).append(", desiredStackVersion=").append(
+ desiredStackVersion.getStackId()).append(", services=[ ");
+ boolean first = true;
+ for (Service s : services.values()) {
+ if (!first) {
+ sb.append(" , ");
}
- sb.append(" ] }");
- } finally {
- clusterGlobalLock.readLock().unlock();
+ first = false;
+ sb.append("\n ");
+ s.debugDump(sb);
+ sb.append(' ');
}
+ sb.append(" ] }");
}
@Override
@@ -2198,7 +1968,6 @@ public class ClusterImpl implements Cluster {
@Override
@Transactional
public void deleteAllServices() throws AmbariException {
- loadServices();
clusterGlobalLock.writeLock().lock();
try {
LOG.info("Deleting all services for cluster" + ", clusterName="
@@ -2224,7 +1993,6 @@ public class ClusterImpl implements Cluster {
@Override
public void deleteService(String serviceName)
throws AmbariException {
- loadServices();
clusterGlobalLock.writeLock().lock();
try {
Service service = getService(serviceName);
@@ -2273,7 +2041,6 @@ public class ClusterImpl implements Cluster {
@Override
public boolean canBeRemoved() {
- loadServices();
clusterGlobalLock.readLock().lock();
try {
boolean safeToRemove = true;
@@ -2388,7 +2155,6 @@ public class ClusterImpl implements Cluster {
* @return a map of type-to-configuration information.
*/
private Map<String, Set<DesiredConfig>> getDesiredConfigs(boolean allVersions) {
- loadConfigurations();
clusterGlobalLock.readLock().lock();
try {
Map<String, Set<DesiredConfig>> map = new HashMap<>();
@@ -2474,37 +2240,38 @@ public class ClusterImpl implements Cluster {
clusterGlobalLock.writeLock().lock();
try {
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- // set config group
- if (configGroup != null) {
- serviceConfigEntity.setGroupId(configGroup.getId());
- Collection<Config> configs = configGroup.getConfigurations().values();
- List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>(configs.size());
- for (Config config : configs) {
- configEntities.add(clusterDAO.findConfig(getClusterId(), config.getType(), config.getTag()));
- }
-
- serviceConfigEntity.setClusterConfigEntities(configEntities);
- } else {
- List<ClusterConfigEntity> configEntities = getClusterConfigEntitiesByService(serviceName);
- serviceConfigEntity.setClusterConfigEntities(configEntities);
+ // set config group
+ if (configGroup != null) {
+ serviceConfigEntity.setGroupId(configGroup.getId());
+ Collection<Config> configs = configGroup.getConfigurations().values();
+ List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>(
+ configs.size());
+ for (Config config : configs) {
+ configEntities.add(
+ clusterDAO.findConfig(getClusterId(), config.getType(), config.getTag()));
}
+ serviceConfigEntity.setClusterConfigEntities(configEntities);
+ } else {
+ List<ClusterConfigEntity> configEntities = getClusterConfigEntitiesByService(serviceName);
+ serviceConfigEntity.setClusterConfigEntities(configEntities);
+ }
- long nextServiceConfigVersion = serviceConfigDAO.findNextServiceConfigVersion(clusterId, serviceName);
- serviceConfigEntity.setServiceName(serviceName);
- serviceConfigEntity.setClusterEntity(clusterEntity);
- serviceConfigEntity.setVersion(nextServiceConfigVersion);
- serviceConfigEntity.setUser(user);
- serviceConfigEntity.setNote(note);
- serviceConfigEntity.setStack(clusterEntity.getDesiredStack());
+ long nextServiceConfigVersion = serviceConfigDAO.findNextServiceConfigVersion(clusterId,
+ serviceName);
- serviceConfigDAO.create(serviceConfigEntity);
- if (configGroup != null) {
- serviceConfigEntity.setHostIds(new ArrayList<Long>(configGroup.getHosts().keySet()));
- serviceConfigEntity = serviceConfigDAO.merge(serviceConfigEntity);
- }
+ serviceConfigEntity.setServiceName(serviceName);
+ serviceConfigEntity.setClusterEntity(clusterEntity);
+ serviceConfigEntity.setVersion(nextServiceConfigVersion);
+ serviceConfigEntity.setUser(user);
+ serviceConfigEntity.setNote(note);
+ serviceConfigEntity.setStack(clusterEntity.getDesiredStack());
+
+ serviceConfigDAO.create(serviceConfigEntity);
+ if (configGroup != null) {
+ serviceConfigEntity.setHostIds(new ArrayList<Long>(configGroup.getHosts().keySet()));
+ serviceConfigEntity = serviceConfigDAO.merge(serviceConfigEntity);
}
} finally {
clusterGlobalLock.writeLock().unlock();
@@ -2597,7 +2364,6 @@ public class ClusterImpl implements Cluster {
@Override
public List<ServiceConfigVersionResponse> getServiceConfigVersions() {
- loadConfigurations();
clusterGlobalLock.readLock().lock();
try {
List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<ServiceConfigVersionResponse>();
@@ -2926,35 +2692,26 @@ public class ClusterImpl implements Cluster {
@Override
public Config getDesiredConfigByType(String configType) {
- loadConfigurations();
- clusterGlobalLock.readLock().lock();
- try {
- for (ClusterConfigMappingEntity e : clusterDAO.getClusterConfigMappingEntitiesByCluster(getClusterId())) {
- if (e.isSelected() > 0 && e.getType().equals(configType)) {
- return getConfig(e.getType(), e.getTag());
- }
+ for (ClusterConfigMappingEntity e : clusterDAO.getClusterConfigMappingEntitiesByCluster(
+ getClusterId())) {
+ if (e.isSelected() > 0 && e.getType().equals(configType)) {
+ return getConfig(e.getType(), e.getTag());
}
-
- return null;
- } finally {
- clusterGlobalLock.readLock().unlock();
}
+
+ return null;
}
@Override
public boolean isConfigTypeExists(String configType) {
- clusterGlobalLock.readLock().lock();
- try {
- for (ClusterConfigMappingEntity e : clusterDAO.getClusterConfigMappingEntitiesByCluster(getClusterId())) {
- if (e.getType().equals(configType)) {
- return true;
- }
+ for (ClusterConfigMappingEntity e : clusterDAO.getClusterConfigMappingEntitiesByCluster(
+ getClusterId())) {
+ if (e.getType().equals(configType)) {
+ return true;
}
-
- return false;
- } finally {
- clusterGlobalLock.readLock().unlock();
}
+
+ return false;
}
@Override
@@ -3125,13 +2882,13 @@ public class ClusterImpl implements Cluster {
*/
@Override
public Set<String> getHosts(String serviceName, String componentName) {
- Map<String, Service> services = getServices();
+ Map<String, Service> clusterServices = getServices();
- if (!services.containsKey(serviceName)) {
+ if (!clusterServices.containsKey(serviceName)) {
return Collections.emptySet();
}
- Service service = services.get(serviceName);
+ Service service = clusterServices.get(serviceName);
Map<String, ServiceComponent> components = service.getServiceComponents();
if (!components.containsKey(componentName) ||
@@ -3250,16 +3007,14 @@ public class ClusterImpl implements Cluster {
@Override
public boolean checkPermission(PrivilegeEntity privilegeEntity, boolean readOnly) {
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- ResourceEntity resourceEntity = clusterEntity.getResource();
- if (resourceEntity != null) {
- Integer permissionId = privilegeEntity.getPermission().getId();
- // CLUSTER.USER or CLUSTER.ADMINISTRATOR for the given cluster resource.
- if (privilegeEntity.getResource().equals(resourceEntity)) {
- if ((readOnly && permissionId.equals(PermissionEntity.CLUSTER_USER_PERMISSION)) ||
- permissionId.equals(PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION)) {
- return true;
- }
+ ResourceEntity resourceEntity = clusterEntity.getResource();
+ if (resourceEntity != null) {
+ Integer permissionId = privilegeEntity.getPermission().getId();
+ // CLUSTER.USER or CLUSTER.ADMINISTRATOR for the given cluster resource.
+ if (privilegeEntity.getResource().equals(resourceEntity)) {
+ if ((readOnly && permissionId.equals(PermissionEntity.CLUSTER_USER_PERMISSION))
+ || permissionId.equals(PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION)) {
+ return true;
}
}
}
@@ -3525,19 +3280,16 @@ public class ClusterImpl implements Cluster {
* Caches all of the {@link ClusterConfigEntity}s in {@link #allConfigs}.
*/
private void cacheConfigurations() {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- if (null == allConfigs) {
- allConfigs = new HashMap<String, Map<String, Config>>();
- }
-
+ clusterGlobalLock.writeLock().lock();
+ try {
+ ClusterEntity clusterEntity = getClusterEntity();
allConfigs.clear();
if (!clusterEntity.getClusterConfigEntities().isEmpty()) {
for (ClusterConfigEntity entity : clusterEntity.getClusterConfigEntities()) {
if (!allConfigs.containsKey(entity.getType())) {
- allConfigs.put(entity.getType(), new HashMap<String, Config>());
+ allConfigs.put(entity.getType(), new ConcurrentHashMap<String, Config>());
}
Config config = configFactory.createExisting(this, entity);
@@ -3545,80 +3297,23 @@ public class ClusterImpl implements Cluster {
allConfigs.get(entity.getType()).put(entity.getTag(), config);
}
}
- }
- }
-
- private void loadConfigurations() {
- if (allConfigs != null) {
- return;
- }
- clusterGlobalLock.writeLock().lock();
- try {
- if (allConfigs != null) {
- return;
- }
- cacheConfigurations();
-
} finally {
clusterGlobalLock.writeLock().unlock();
}
}
private void loadStackVersion() {
- if (desiredStackVersionSet) {
- return;
- }
- clusterGlobalLock.writeLock().lock();
- try {
-
- if (desiredStackVersionSet) {
- return;
- }
+ desiredStackVersion = new StackId(getClusterEntity().getDesiredStack());
- desiredStackVersion = new StackId(getClusterEntity().getDesiredStack());
-
- if (!StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
- StringUtils.isEmpty(desiredStackVersion.getStackVersion())) {
- try {
- loadServiceConfigTypes();
- } catch (AmbariException e) {
- //TODO recheck wrapping exception here, required for lazy loading after invalidation
- throw new RuntimeException(e);
- }
+ if (!StringUtils.isEmpty(desiredStackVersion.getStackName())
+ && !StringUtils.isEmpty(desiredStackVersion.getStackVersion())) {
+ try {
+ loadServiceConfigTypes();
+ } catch (AmbariException e) {
+ // TODO recheck wrapping exception here, required for lazy loading after
+ // invalidation
+ throw new RuntimeException(e);
}
-
- desiredStackVersionSet = true;
-
- } finally {
- clusterGlobalLock.writeLock().unlock();
- }
-
- }
-
- /**
- * Purpose of this method is to clear all cached data to re-read it from database.
- * To be used in case of desync.
- */
- @Override
- public void invalidateData() {
- clusterGlobalLock.writeLock().lock();
- try {
- allConfigs = null;
- services = null;
- desiredStackVersionSet = false;
-
- serviceComponentHosts.clear();
- serviceComponentHostsByHost.clear();
- svcHostsLoaded = false;
-
- clusterConfigGroups = null;
-
- //TODO investigate reset request executions, it has separate api which is not too heavy
-
- refresh();
-
- } finally {
- clusterGlobalLock.writeLock().unlock();
}
}
@@ -3668,17 +3363,8 @@ public class ClusterImpl implements Cluster {
*/
@Override
public UpgradeEntity getUpgradeEntity() {
- clusterGlobalLock.readLock().lock();
- try {
- ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- return clusterEntity.getUpgradeEntity();
- } else {
- return null;
- }
- } finally {
- clusterGlobalLock.readLock().unlock();
- }
+ ClusterEntity clusterEntity = getClusterEntity();
+ return clusterEntity.getUpgradeEntity();
}
/**
@@ -3687,20 +3373,15 @@ public class ClusterImpl implements Cluster {
@Override
@Transactional
public void setUpgradeEntity(UpgradeEntity upgradeEntity) throws AmbariException {
- clusterGlobalLock.writeLock().lock();
try {
ClusterEntity clusterEntity = getClusterEntity();
- if (clusterEntity != null) {
- clusterEntity.setUpgradeEntity(upgradeEntity);
- clusterDAO.merge(clusterEntity);
- }
+ clusterEntity.setUpgradeEntity(upgradeEntity);
+ clusterDAO.merge(clusterEntity);
} catch (RollbackException e) {
String msg = "Unable to set upgrade entiry " + upgradeEntity + " for cluster "
+ getClusterName();
LOG.warn(msg);
throw new AmbariException(msg, e);
- } finally {
- clusterGlobalLock.writeLock().unlock();
}
}
@@ -3772,4 +3453,5 @@ public class ClusterImpl implements Cluster {
m_clusterPropertyCache.clear();
}
+
}
[07/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aee49b67
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aee49b67
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aee49b67
Branch: refs/heads/trunk
Commit: aee49b67f5daf558e0dfa2091bb97c2619e3c097
Parents: d53c9e2 f2bcbbe
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 3 14:04:20 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 3 14:04:20 2016 -0400
----------------------------------------------------------------------
.../ui/admin-web/app/views/groups/edit.html | 4 +-
ambari-agent/conf/unix/ambari-agent.ini | 1 +
.../src/main/python/ambari_agent/ActionQueue.py | 22 +-
.../src/main/python/ambari_agent/HostCleanup.py | 6 +-
.../ambari_agent/PythonReflectiveExecutor.py | 25 +-
.../test/python/ambari_agent/TestActionQueue.py | 3 +-
.../ambari/logfeeder/LogFeederAMSClient.java | 9 +-
.../logsearch/solr/metrics/SolrAmsClient.java | 9 +-
.../timeline/AbstractTimelineMetricsSink.java | 59 +-
.../availability/MetricCollectorHATest.java | 22 +-
.../cache/HandleConnectExceptionTest.java | 12 +-
.../sink/flume/FlumeTimelineMetricsSink.java | 14 +-
.../timeline/HadoopTimelineMetricsSink.java | 27 +-
.../timeline/HadoopTimelineMetricsSinkTest.java | 12 +-
.../kafka/KafkaTimelineMetricsReporter.java | 14 +-
.../storm/StormTimelineMetricsReporter.java | 14 +-
.../sink/storm/StormTimelineMetricsSink.java | 15 +-
.../storm/StormTimelineMetricsReporter.java | 13 +-
.../sink/storm/StormTimelineMetricsSink.java | 15 +-
.../system/impl/AmbariMetricSinkImpl.java | 12 +-
.../org/apache/ambari/server/state/Alert.java | 7 +-
.../1.6.1.2.2.0/package/scripts/params.py | 5 -
.../hadoop-metrics2-accumulo.properties.j2 | 12 +-
.../default/grafana-ams-hbase-home.json | 2558 +++++
.../default/grafana-ams-hbase-misc.json | 1680 ++++
.../grafana-ams-hbase-regionservers.json | 9063 ++++++++++++++++++
.../hadoop-metrics2-hbase.properties.j2 | 14 +-
.../FLUME/1.4.0.2.0/package/scripts/flume.py | 1 +
.../FLUME/1.4.0.2.0/package/scripts/params.py | 7 -
.../templates/flume-metrics2.properties.j2 | 2 +-
.../0.96.0.2.0/package/scripts/params_linux.py | 12 -
...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 15 +-
...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 15 +-
.../hadoop-metrics2.properties.xml | 18 +-
.../0.12.0.2.0/package/scripts/params_linux.py | 5 -
.../hadoop-metrics2-hivemetastore.properties.j2 | 3 +-
.../hadoop-metrics2-hiveserver2.properties.j2 | 4 +-
.../templates/hadoop-metrics2-llapdaemon.j2 | 3 +-
.../hadoop-metrics2-llaptaskscheduler.j2 | 3 +-
.../KAFKA/0.8.1/configuration/kafka-broker.xml | 2 +-
.../KAFKA/0.8.1/package/scripts/kafka.py | 2 +-
.../KAFKA/0.8.1/package/scripts/params.py | 7 -
.../common-services/KAFKA/0.9.0/widgets.json | 18 +-
.../STORM/0.9.1/package/scripts/params_linux.py | 11 -
.../0.9.1/package/templates/config.yaml.j2 | 6 +-
.../templates/storm-metrics2.properties.j2 | 2 +-
.../2.0.6/hooks/before-START/scripts/params.py | 7 -
.../templates/hadoop-metrics2.properties.j2 | 18 +-
.../stacks/HDP/2.0.6/services/stack_advisor.py | 10 +-
.../apache/ambari/server/state/AlertTest.java | 53 +
.../service/manage_config_groups_controller.js | 2 +-
.../configs/config_recommendation_parser.js | 2 +-
ambari-web/app/utils/hosts.js | 2 +-
.../app/views/common/chart/linear_time.js | 2 +-
.../configs/service_configs_by_category_view.js | 535 +-
.../config_recommendation_parser_test.js | 4 +-
.../service_configs_by_category_view_test.js | 7 +-
57 files changed, 13991 insertions(+), 434 deletions(-)
----------------------------------------------------------------------
[05/32] ambari git commit: AMBARI-18495 - Remove Unnecessary Locks
Inside Of Cluster Business Object Implementations (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 6318545..7b119f2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -28,8 +28,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.persistence.RollbackException;
@@ -103,18 +101,12 @@ public class ClustersImpl implements Clusters {
private static final Logger LOG = LoggerFactory.getLogger(
ClustersImpl.class);
- private ConcurrentHashMap<String, Cluster> clusters;
- private ConcurrentHashMap<Long, Cluster> clustersById;
- private ConcurrentHashMap<String, Host> hosts;
- private ConcurrentHashMap<Long, Host> hostsById;
- private ConcurrentHashMap<String, Set<Cluster>> hostClusterMap;
- private ConcurrentHashMap<String, Set<Host>> clusterHostMap;
-
- private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
- private final Lock r = rwl.readLock();
- private final Lock w = rwl.writeLock();
-
- private volatile boolean clustersLoaded = false;
+ private final ConcurrentHashMap<String, Cluster> clusters = new ConcurrentHashMap<String, Cluster>();
+ private final ConcurrentHashMap<Long, Cluster> clustersById = new ConcurrentHashMap<Long, Cluster>();
+ private final ConcurrentHashMap<String, Host> hosts = new ConcurrentHashMap<String, Host>();
+ private final ConcurrentHashMap<Long, Host> hostsById = new ConcurrentHashMap<Long, Host>();
+ private final ConcurrentHashMap<String, Set<Cluster>> hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
+ private final ConcurrentHashMap<String, Set<Host>> clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
@Inject
private ClusterDAO clusterDAO;
@@ -168,33 +160,36 @@ public class ClustersImpl implements Clusters {
private AmbariEventPublisher eventPublisher;
@Inject
- public ClustersImpl() {
- clusters = new ConcurrentHashMap<String, Cluster>();
- clustersById = new ConcurrentHashMap<Long, Cluster>();
- hosts = new ConcurrentHashMap<String, Host>();
- hostsById = new ConcurrentHashMap<Long, Host>();
- hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
- clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
-
- LOG.info("Initializing the ClustersImpl");
- }
+ public ClustersImpl(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+ HostFactory hostFactory) {
- private void checkLoaded() {
- if (!clustersLoaded) {
- w.lock();
- try {
- if (!clustersLoaded) {
- loadClustersAndHosts();
- }
- clustersLoaded = true;
- } finally {
- w.unlock();
- }
- }
+ this.clusterDAO = clusterDAO;
+ this.clusterFactory = clusterFactory;
+ this.hostDAO = hostDAO;
+ this.hostFactory = hostFactory;
}
+ /**
+ * Inititalizes all of the in-memory state collections that this class
+ * unfortunately uses. It's annotated with {@link Inject} as a way to define a
+ * very simple lifecycle with Guice where the constructor is instantiated
+ * (allowing injected members) followed by this method which initiailizes the
+ * state of the instance.
+ * <p/>
+ * Because some of these stateful initializations may actually reference this
+ * {@link Clusters} instance, we must do this after the object has been
+ * instantiated and injected.
+ */
+ @Inject
@Transactional
- void loadClustersAndHosts() {
+ private void loadClustersAndHosts() {
+ List<HostEntity> hostEntities = hostDAO.findAll();
+ for (HostEntity hostEntity : hostEntities) {
+ Host host = hostFactory.create(hostEntity, true);
+ hosts.put(hostEntity.getHostName(), host);
+ hostsById.put(hostEntity.getHostId(), host);
+ }
+
for (ClusterEntity clusterEntity : clusterDAO.findAll()) {
Cluster currentCluster = clusterFactory.create(clusterEntity);
clusters.put(clusterEntity.getClusterName(), currentCluster);
@@ -202,13 +197,11 @@ public class ClustersImpl implements Clusters {
clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
}
- for (HostEntity hostEntity : hostDAO.findAll()) {
- Host host = hostFactory.create(hostEntity, true);
- hosts.put(hostEntity.getHostName(), host);
- hostsById.put(hostEntity.getHostId(), host);
+ for (HostEntity hostEntity : hostEntities) {
Set<Cluster> cSet = Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>());
hostClusterMap.put(hostEntity.getHostName(), cSet);
+ Host host = hosts.get(hostEntity.getHostName());
for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
clusterHostMap.get(clusterEntity.getClusterName()).add(host);
cSet.add(clusters.get(clusterEntity.getClusterName()));
@@ -225,66 +218,56 @@ public class ClustersImpl implements Clusters {
@Override
public void addCluster(String clusterName, StackId stackId, SecurityType securityType)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
- w.lock();
- try {
- if (clusters.containsKey(clusterName)) {
- throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
- + ", clusterName=" + clusterName);
- }
-
- // create an admin resource to represent this cluster
- ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
- if (resourceTypeEntity == null) {
- resourceTypeEntity = new ResourceTypeEntity();
- resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
- resourceTypeEntity.setName(ResourceType.CLUSTER.name());
- resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- }
+ if (clusters.containsKey(clusterName)) {
+ throw new DuplicateResourceException(
+ "Attempted to create a Cluster which already exists" + ", clusterName=" + clusterName);
+ }
- ResourceEntity resourceEntity = new ResourceEntity();
- resourceEntity.setResourceType(resourceTypeEntity);
+ // create an admin resource to represent this cluster
+ ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
+ if (resourceTypeEntity == null) {
+ resourceTypeEntity = new ResourceTypeEntity();
+ resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
+ resourceTypeEntity.setName(ResourceType.CLUSTER.name());
+ resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
+ }
- StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
- stackId.getStackVersion());
+ ResourceEntity resourceEntity = new ResourceEntity();
+ resourceEntity.setResourceType(resourceTypeEntity);
- // retrieve new cluster id
- // add cluster id -> cluster mapping into clustersById
- ClusterEntity clusterEntity = new ClusterEntity();
- clusterEntity.setClusterName(clusterName);
- clusterEntity.setDesiredStack(stackEntity);
- clusterEntity.setResource(resourceEntity);
- if (securityType != null) {
- clusterEntity.setSecurityType(securityType);
- }
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
- try {
- clusterDAO.create(clusterEntity);
- clusterEntity = clusterDAO.merge(clusterEntity);
- } catch (RollbackException e) {
- LOG.warn("Unable to create cluster " + clusterName, e);
- throw new AmbariException("Unable to create cluster " + clusterName, e);
- }
+ // retrieve new cluster id
+ // add cluster id -> cluster mapping into clustersById
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName(clusterName);
+ clusterEntity.setDesiredStack(stackEntity);
+ clusterEntity.setResource(resourceEntity);
+ if (securityType != null) {
+ clusterEntity.setSecurityType(securityType);
+ }
- cluster = clusterFactory.create(clusterEntity);
- clusters.put(clusterName, cluster);
- clustersById.put(cluster.getClusterId(), cluster);
- clusterHostMap.put(clusterName, new HashSet<Host>());
- } finally {
- w.unlock();
+ try {
+ clusterDAO.create(clusterEntity);
+ } catch (RollbackException e) {
+ LOG.warn("Unable to create cluster " + clusterName, e);
+ throw new AmbariException("Unable to create cluster " + clusterName, e);
}
+ cluster = clusterFactory.create(clusterEntity);
+ clusters.put(clusterName, cluster);
+ clustersById.put(cluster.getClusterId(), cluster);
+ clusterHostMap.put(clusterName,
+ Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+
cluster.setCurrentStackVersion(stackId);
}
@Override
public Cluster getCluster(String clusterName)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
if (clusterName != null) {
cluster = clusters.get(clusterName);
@@ -299,8 +282,6 @@ public class ClustersImpl implements Clusters {
@Override
public Cluster getCluster(Long clusterId)
throws AmbariException {
- checkLoaded();
-
Cluster cluster = null;
if (clusterId != null) {
cluster = clustersById.get(clusterId);
@@ -314,8 +295,6 @@ public class ClustersImpl implements Clusters {
@Override
public Cluster getClusterById(long id) throws AmbariException {
- checkLoaded();
-
Cluster cluster = clustersById.get(id);
if (null == cluster) {
throw new ClusterNotFoundException("clusterID=" + id);
@@ -327,6 +306,7 @@ public class ClustersImpl implements Clusters {
@Override
public void setCurrentStackVersion(String clusterName, StackId stackId)
throws AmbariException{
+
if(stackId == null || clusterName == null || clusterName.isEmpty()){
LOG.warn("Unable to set version for cluster " + clusterName);
throw new AmbariException("Unable to set"
@@ -334,19 +314,9 @@ public class ClustersImpl implements Clusters {
+ " for cluster " + clusterName);
}
- checkLoaded();
-
- Cluster cluster = null;
-
- r.lock();
- try {
- if (!clusters.containsKey(clusterName)) {
- throw new ClusterNotFoundException(clusterName);
- }
-
- cluster = clusters.get(clusterName);
- } finally {
- r.unlock();
+ Cluster cluster = clusters.get(clusterName);
+ if (null == cluster) {
+ throw new ClusterNotFoundException(clusterName);
}
cluster.setCurrentStackVersion(stackId);
@@ -354,15 +324,12 @@ public class ClustersImpl implements Clusters {
@Override
public List<Host> getHosts() {
- checkLoaded();
-
return new ArrayList<Host>(hosts.values());
}
@Override
public Set<Cluster> getClustersForHost(String hostname)
throws AmbariException {
- checkLoaded();
Set<Cluster> clusters = hostClusterMap.get(hostname);
if(clusters == null){
throw new HostNotFoundException(hostname);
@@ -378,19 +345,16 @@ public class ClustersImpl implements Clusters {
@Override
public Host getHost(String hostname) throws AmbariException {
- checkLoaded();
-
- if (!hosts.containsKey(hostname)) {
+ Host host = hosts.get(hostname);
+ if (null == host) {
throw new HostNotFoundException(hostname);
}
- return hosts.get(hostname);
+ return host;
}
@Override
public boolean hostExists(String hostname){
- checkLoaded();
-
return hosts.containsKey(hostname);
}
@@ -399,8 +363,6 @@ public class ClustersImpl implements Clusters {
*/
@Override
public boolean isHostMappedToCluster(String clusterName, String hostName) {
- checkLoaded();
-
Set<Cluster> clusters = hostClusterMap.get(hostName);
for (Cluster cluster : clusters) {
if (clusterName.equals(cluster.getClusterName())) {
@@ -413,8 +375,6 @@ public class ClustersImpl implements Clusters {
@Override
public Host getHostById(Long hostId) throws AmbariException {
- checkLoaded();
-
if (!hostsById.containsKey(hostId)) {
throw new HostNotFoundException("Host Id = " + hostId);
}
@@ -442,40 +402,32 @@ public class ClustersImpl implements Clusters {
*/
@Override
public void addHost(String hostname) throws AmbariException {
- checkLoaded();
-
if (hosts.containsKey(hostname)) {
throw new AmbariException(MessageFormat.format("Duplicate entry for Host {0}", hostname));
}
- w.lock();
+ HostEntity hostEntity = new HostEntity();
+ hostEntity.setHostName(hostname);
+ hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
- try {
- HostEntity hostEntity = new HostEntity();
- hostEntity.setHostName(hostname);
- hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
-
- //not stored to DB
- Host host = hostFactory.create(hostEntity, false);
- host.setAgentVersion(new AgentVersion(""));
- List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
- host.setDisksInfo(emptyDiskList);
- host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
- host.setHostAttributes(new HashMap<String, String>());
- host.setState(HostState.INIT);
+ // not stored to DB
+ Host host = hostFactory.create(hostEntity, false);
+ host.setAgentVersion(new AgentVersion(""));
+ List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
+ host.setDisksInfo(emptyDiskList);
+ host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
+ host.setHostAttributes(new HashMap<String, String>());
+ host.setState(HostState.INIT);
- // the hosts by ID map is updated separately since the host has not yet
- // been persisted yet - the below event is what causes the persist
- hosts.put(hostname, host);
+ // the hosts by ID map is updated separately since the host has not yet
+ // been persisted yet - the below event is what causes the persist
+ hosts.put(hostname, host);
- hostClusterMap.put(hostname, Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
+ hostClusterMap.put(hostname,
+ Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding a host to Clusters"
- + ", hostname=" + hostname);
- }
- } finally {
- w.unlock();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding a host to Clusters" + ", hostname=" + hostname);
}
// publish the event
@@ -494,42 +446,37 @@ public class ClustersImpl implements Clusters {
public void updateHostWithClusterAndAttributes(
Map<String, Set<String>> hostClusters,
Map<String, Map<String, String>> hostAttributes) throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- if (hostClusters != null) {
- Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
- Set<String> clusterNames = new HashSet<String>();
- for (Set<String> cSet : hostClusters.values()) {
- clusterNames.addAll(cSet);
- }
+ if (null == hostClusters || hostClusters.isEmpty()) {
+ return;
+ }
- for (String hostname : hostClusters.keySet()) {
- Host host = hostMap.get(hostname);
- Map<String, String> attributes = hostAttributes.get(hostname);
- if (attributes != null && !attributes.isEmpty()){
- host.setHostAttributes(attributes);
- }
+ Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
+ Set<String> clusterNames = new HashSet<String>();
+ for (Set<String> cSet : hostClusters.values()) {
+ clusterNames.addAll(cSet);
+ }
- host.refresh();
+ for (String hostname : hostClusters.keySet()) {
+ Host host = hostMap.get(hostname);
+ Map<String, String> attributes = hostAttributes.get(hostname);
+ if (attributes != null && !attributes.isEmpty()) {
+ host.setHostAttributes(attributes);
+ }
- Set<String> hostClusterNames = hostClusters.get(hostname);
- for (String clusterName : hostClusterNames) {
- if (clusterName != null && !clusterName.isEmpty()) {
- mapHostToCluster(hostname, clusterName);
- }
- }
+ host.refresh();
+
+ Set<String> hostClusterNames = hostClusters.get(hostname);
+ for (String clusterName : hostClusterNames) {
+ if (clusterName != null && !clusterName.isEmpty()) {
+ mapHostToCluster(hostname, clusterName);
}
}
- } finally {
- w.unlock();
}
}
private Map<String, Host> getHostsMap(Collection<String> hostSet) throws
HostNotFoundException {
- checkLoaded();
Map<String, Host> hostMap = new HashMap<String, Host>();
Host host = null;
@@ -557,15 +504,9 @@ public class ClustersImpl implements Clusters {
*/
@Override
public void mapHostsToCluster(Set<String> hostnames, String clusterName) throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
- for (String hostname : hostnames) {
- mapHostToCluster(hostname, clusterName, clusterVersionEntity);
- }
- } finally {
- w.unlock();
+ ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
+ for (String hostname : hostnames) {
+ mapHostToCluster(hostname, clusterName, clusterVersionEntity);
}
}
@@ -582,22 +523,15 @@ public class ClustersImpl implements Clusters {
Host host = null;
Cluster cluster = null;
- checkLoaded();
+ host = getHost(hostname);
+ cluster = getCluster(clusterName);
- r.lock();
- try {
- host = getHost(hostname);
- cluster = getCluster(clusterName);
-
- // check to ensure there are no duplicates
- for (Cluster c : hostClusterMap.get(hostname)) {
- if (c.getClusterName().equals(clusterName)) {
- throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
- clusterName + ", hostName=" + hostname);
- }
+ // check to ensure there are no duplicates
+ for (Cluster c : hostClusterMap.get(hostname)) {
+ if (c.getClusterName().equals(clusterName)) {
+ throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
+ clusterName + ", hostName=" + hostname);
}
- } finally {
- r.unlock();
}
if (!isOsSupportedByClusterStack(cluster, host)) {
@@ -615,14 +549,9 @@ public class ClustersImpl implements Clusters {
clusterId);
}
- w.lock();
- try {
- mapHostClusterEntities(hostname, clusterId);
- hostClusterMap.get(hostname).add(cluster);
- clusterHostMap.get(clusterName).add(host);
- } finally {
- w.unlock();
- }
+ mapHostClusterEntities(hostname, clusterId);
+ hostClusterMap.get(hostname).add(cluster);
+ clusterHostMap.get(clusterName).add(host);
cluster.refresh();
host.refresh();
@@ -638,8 +567,6 @@ public class ClustersImpl implements Clusters {
@Override
public void mapHostToCluster(String hostname, String clusterName)
throws AmbariException {
- checkLoaded();
-
ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
mapHostToCluster(hostname, clusterName, clusterVersionEntity);
}
@@ -662,169 +589,121 @@ public class ClustersImpl implements Clusters {
@Override
public Map<String, Cluster> getClusters() {
- checkLoaded();
- r.lock();
- try {
- return Collections.unmodifiableMap(clusters);
- } finally {
- r.unlock();
- }
+ return Collections.unmodifiableMap(clusters);
}
@Override
public void updateClusterName(String oldName, String newName) {
- w.lock();
- try {
- clusters.put(newName, clusters.remove(oldName));
- clusterHostMap.put(newName, clusterHostMap.remove(oldName));
- } finally {
- w.unlock();
- }
+ clusters.put(newName, clusters.remove(oldName));
+ clusterHostMap.put(newName, clusterHostMap.remove(oldName));
}
@Override
public void debugDump(StringBuilder sb) {
- r.lock();
- try {
- sb.append("Clusters=[ ");
- boolean first = true;
- for (Cluster c : clusters.values()) {
- if (!first) {
- sb.append(" , ");
- }
- first = false;
- sb.append("\n ");
- c.debugDump(sb);
- sb.append(" ");
+ sb.append("Clusters=[ ");
+ boolean first = true;
+ for (Cluster c : clusters.values()) {
+ if (!first) {
+ sb.append(" , ");
}
- sb.append(" ]");
- } finally {
- r.unlock();
+ first = false;
+ sb.append("\n ");
+ c.debugDump(sb);
+ sb.append(" ");
}
+ sb.append(" ]");
}
@Override
public Map<String, Host> getHostsForCluster(String clusterName)
throws AmbariException {
- checkLoaded();
- r.lock();
-
- try {
- Map<String, Host> hosts = new HashMap<String, Host>();
-
- for (Host h : clusterHostMap.get(clusterName)) {
- hosts.put(h.getHostName(), h);
- }
-
- return hosts;
- } finally {
- r.unlock();
+ Map<String, Host> hosts = new HashMap<String, Host>();
+ for (Host h : clusterHostMap.get(clusterName)) {
+ hosts.put(h.getHostName(), h);
}
+
+ return hosts;
}
@Override
public Map<Long, Host> getHostIdsForCluster(String clusterName)
throws AmbariException {
+ Map<Long, Host> hosts = new HashMap<Long, Host>();
- checkLoaded();
- r.lock();
-
- try {
- Map<Long, Host> hosts = new HashMap<Long, Host>();
-
- for (Host h : clusterHostMap.get(clusterName)) {
- HostEntity hostEntity = hostDAO.findByName(h.getHostName());
- hosts.put(hostEntity.getHostId(), h);
- }
-
- return hosts;
- } finally {
- r.unlock();
+ for (Host h : clusterHostMap.get(clusterName)) {
+ HostEntity hostEntity = hostDAO.findByName(h.getHostName());
+ hosts.put(hostEntity.getHostId(), h);
}
+
+ return hosts;
}
@Override
public void deleteCluster(String clusterName)
throws AmbariException {
- checkLoaded();
- w.lock();
- try {
- Cluster cluster = getCluster(clusterName);
- if (!cluster.canBeRemoved()) {
- throw new AmbariException("Could not delete cluster"
- + ", clusterName=" + clusterName);
- }
- LOG.info("Deleting cluster " + cluster.getClusterName());
- cluster.delete();
+ Cluster cluster = getCluster(clusterName);
+ if (!cluster.canBeRemoved()) {
+ throw new AmbariException("Could not delete cluster" + ", clusterName=" + clusterName);
+ }
- //clear maps
- for (Set<Cluster> clusterSet : hostClusterMap.values()) {
- clusterSet.remove(cluster);
- }
- clusterHostMap.remove(cluster.getClusterName());
+ LOG.info("Deleting cluster " + cluster.getClusterName());
+ cluster.delete();
- Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
- for (ClusterVersionEntity clusterVersion : clusterVersions) {
- clusterVersionDAO.remove(clusterVersion);
- }
+ // clear maps
+ for (Set<Cluster> clusterSet : hostClusterMap.values()) {
+ clusterSet.remove(cluster);
+ }
+ clusterHostMap.remove(cluster.getClusterName());
- clusters.remove(clusterName);
- } finally {
- w.unlock();
+ Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
+ for (ClusterVersionEntity clusterVersion : clusterVersions) {
+ clusterVersionDAO.remove(clusterVersion);
}
+
+ clusters.remove(clusterName);
}
@Override
public void unmapHostFromCluster(String hostname, String clusterName) throws AmbariException {
final Cluster cluster = getCluster(clusterName);
- unmapHostFromClusters(hostname, Sets.newHashSet(cluster));
+ Host host = getHost(hostname);
+
+ unmapHostFromClusters(host, Sets.newHashSet(cluster));
+
+ cluster.refresh();
+ host.refresh();
}
@Transactional
- void unmapHostFromClusters(String hostname, Set<Cluster> clusters) throws AmbariException {
- Host host = null;
+ void unmapHostFromClusters(Host host, Set<Cluster> clusters) throws AmbariException {
HostEntity hostEntity = null;
- checkLoaded();
if (clusters.isEmpty()) {
return;
}
- r.lock();
- try {
- host = getHost(hostname);
- hostEntity = hostDAO.findByName(hostname);
- } finally {
- r.unlock();
- }
-
- w.lock();
- try {
- for (Cluster cluster : clusters) {
- long clusterId = cluster.getClusterId();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Unmapping host {} from cluster {} (id={})", hostname,
- cluster.getClusterName(), clusterId);
- }
-
- unmapHostClusterEntities(hostname, cluster.getClusterId());
-
- hostClusterMap.get(hostname).remove(cluster);
- clusterHostMap.get(cluster.getClusterName()).remove(host);
+ String hostname = host.getHostName();
+ hostEntity = hostDAO.findByName(hostname);
- host.refresh();
- cluster.refresh();
+ for (Cluster cluster : clusters) {
+ long clusterId = cluster.getClusterId();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Unmapping host {} from cluster {} (id={})", hostname, cluster.getClusterName(),
+ clusterId);
}
- deleteConfigGroupHostMapping(hostEntity.getHostId());
+ unmapHostClusterEntities(hostname, cluster.getClusterId());
- // Remove mapping of principals to the unmapped host
- kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
- } finally {
- w.unlock();
+ hostClusterMap.get(hostname).remove(cluster);
+ clusterHostMap.get(cluster.getClusterName()).remove(host);
}
+
+ deleteConfigGroupHostMapping(hostEntity.getHostId());
+
+ // Remove mapping of principals to the unmapped host
+ kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
}
@Transactional
@@ -890,88 +769,81 @@ public class ClustersImpl implements Clusters {
*/
@Transactional
void deleteHostEntityRelationships(String hostname) throws AmbariException {
- checkLoaded();
-
if (!hosts.containsKey(hostname)) {
throw new HostNotFoundException("Could not find host " + hostname);
}
- w.lock();
-
- try {
- HostEntity entity = hostDAO.findByName(hostname);
-
- if (entity == null) {
- return;
- }
- // Remove from all clusters in the cluster_host_mapping table.
- // This will also remove from kerberos_principal_hosts, hostconfigmapping, and configgrouphostmapping
- Set<Cluster> clusters = hostClusterMap.get(hostname);
- Set<Long> clusterIds = Sets.newHashSet();
- for (Cluster cluster: clusters) {
- clusterIds.add(cluster.getClusterId());
- }
-
+ HostEntity entity = hostDAO.findByName(hostname);
+ if (entity == null) {
+ return;
+ }
+ // Remove from all clusters in the cluster_host_mapping table.
+ // This will also remove from kerberos_principal_hosts, hostconfigmapping,
+ // and configgrouphostmapping
+ Set<Cluster> clusters = hostClusterMap.get(hostname);
+ Set<Long> clusterIds = Sets.newHashSet();
+ for (Cluster cluster : clusters) {
+ clusterIds.add(cluster.getClusterId());
+ }
- unmapHostFromClusters(hostname, clusters);
- hostDAO.refresh(entity);
+ Host host = hosts.get(hostname);
+ unmapHostFromClusters(host, clusters);
+ hostDAO.refresh(entity);
- hostVersionDAO.removeByHostName(hostname);
+ hostVersionDAO.removeByHostName(hostname);
- // Remove blueprint tasks before hostRoleCommands
- // TopologyLogicalTask owns the OneToOne relationship but Cascade is on HostRoleCommandEntity
- if (entity.getHostRoleCommandEntities() != null) {
- for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
- TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
- if (topologyLogicalTaskEnity != null) {
- topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
- hrcEntity.setTopologyLogicalTaskEntity(null);
- }
+ // Remove blueprint tasks before hostRoleCommands
+ // TopologyLogicalTask owns the OneToOne relationship but Cascade is on
+ // HostRoleCommandEntity
+ if (entity.getHostRoleCommandEntities() != null) {
+ for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
+ TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
+ if (topologyLogicalTaskEnity != null) {
+ topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
+ hrcEntity.setTopologyLogicalTaskEntity(null);
}
}
- for (Long clusterId: clusterIds) {
- for (TopologyRequestEntity topologyRequestEntity: topologyRequestDAO.findByClusterId(clusterId)) {
- TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
-
- for (TopologyHostRequestEntity topologyHostRequestEntity: topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
- if (hostname.equals(topologyHostRequestEntity.getHostName())) {
- topologyHostRequestDAO.remove(topologyHostRequestEntity);
- }
+ }
+
+ for (Long clusterId : clusterIds) {
+ for (TopologyRequestEntity topologyRequestEntity : topologyRequestDAO.findByClusterId(
+ clusterId)) {
+ TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
+
+ for (TopologyHostRequestEntity topologyHostRequestEntity : topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
+ if (hostname.equals(topologyHostRequestEntity.getHostName())) {
+ topologyHostRequestDAO.remove(topologyHostRequestEntity);
}
}
}
+ }
- entity.setHostRoleCommandEntities(null);
- hostRoleCommandDAO.removeByHostId(entity.getHostId());
+ entity.setHostRoleCommandEntities(null);
+ hostRoleCommandDAO.removeByHostId(entity.getHostId());
- entity.setHostStateEntity(null);
- hostStateDAO.removeByHostId(entity.getHostId());
- hostConfigMappingDAO.removeByHostId(entity.getHostId());
- serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
- requestOperationLevelDAO.removeByHostId(entity.getHostId());
- topologyHostInfoDAO.removeByHost(entity);
+ entity.setHostStateEntity(null);
+ hostStateDAO.removeByHostId(entity.getHostId());
+ hostConfigMappingDAO.removeByHostId(entity.getHostId());
+ serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
+ requestOperationLevelDAO.removeByHostId(entity.getHostId());
+ topologyHostInfoDAO.removeByHost(entity);
- // Remove from dictionaries
- hosts.remove(hostname);
- hostsById.remove(entity.getHostId());
+ // Remove from dictionaries
+ hosts.remove(hostname);
+ hostsById.remove(entity.getHostId());
- hostDAO.remove(entity);
+ hostDAO.remove(entity);
- // Note, if the host is still heartbeating, then new records will be re-inserted
- // into the hosts and hoststate tables
- } catch (Exception e) {
- throw new AmbariException("Could not remove host", e);
- } finally {
- w.unlock();
- }
+ // Note, if the host is still heartbeating, then new records will be
+ // re-inserted
+ // into the hosts and hoststate tables
}
@Override
public boolean checkPermission(String clusterName, boolean readOnly) {
-
Cluster cluster = findCluster(clusterName);
return (cluster == null && readOnly) || checkPermission(cluster, readOnly);
@@ -999,19 +871,14 @@ public class ClustersImpl implements Clusters {
*/
@Override
public int getClusterSize(String clusterName) {
- checkLoaded();
- r.lock();
-
int hostCount = 0;
- if (clusterHostMap.containsKey(clusterName) && clusterHostMap.get(clusterName) != null) {
+ Set<Host> hosts = clusterHostMap.get(clusterName);
+ if (null != hosts) {
hostCount = clusterHostMap.get(clusterName).size();
}
- r.unlock();
-
return hostCount;
-
}
// ----- helper methods ---------------------------------------------------
@@ -1064,4 +931,16 @@ public class ClustersImpl implements Clusters {
// TODO : should we log this?
return false;
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void invalidate(Cluster cluster) {
+ ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+ Cluster currentCluster = clusterFactory.create(clusterEntity);
+ clusters.put(clusterEntity.getClusterName(), currentCluster);
+ clustersById.put(currentCluster.getClusterId(), currentCluster);
+ clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
index 877e84d..17f1447 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
@@ -17,23 +17,25 @@
*/
package org.apache.ambari.server.utils;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
import org.eclipse.persistence.exceptions.DatabaseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.Callable;
-
/**
* Provides utility methods to support operations retry
* TODO injection as Guice singleon, static for now to avoid major modifications
*/
public class RetryHelper {
protected final static Logger LOG = LoggerFactory.getLogger(RetryHelper.class);
+ private static Clusters s_clusters;
private static ThreadLocal<Set<Cluster>> affectedClusters = new ThreadLocal<Set<Cluster>>(){
@Override
@@ -44,7 +46,8 @@ public class RetryHelper {
private static int operationsRetryAttempts = 0;
- public static void init(int operationsRetryAttempts) {
+ public static void init(Clusters clusters, int operationsRetryAttempts) {
+ s_clusters = clusters;
RetryHelper.operationsRetryAttempts = operationsRetryAttempts;
}
@@ -82,7 +85,7 @@ public class RetryHelper {
public static void invalidateAffectedClusters() {
for (Cluster cluster : affectedClusters.get()) {
- cluster.invalidateData();
+ s_clusters.invalidate(cluster);
}
}
@@ -90,7 +93,6 @@ public class RetryHelper {
RetryHelper.clearAffectedClusters();
int retryAttempts = RetryHelper.getOperationsRetryAttempts();
do {
-
try {
return command.call();
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index dd741e9..37a6ae0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -18,10 +18,12 @@
package org.apache.ambari.server.agent;
+import static org.easymock.EasyMock.createNiceMock;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import javax.persistence.EntityManager;
import javax.ws.rs.core.MediaType;
import org.apache.ambari.server.RandomPortJerseyTest;
@@ -35,6 +37,7 @@ import org.apache.ambari.server.agent.rest.AgentResource;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.security.SecurityHelper;
import org.apache.ambari.server.security.SecurityHelperImpl;
@@ -55,7 +58,6 @@ import org.apache.ambari.server.state.ServiceFactory;
import org.apache.ambari.server.state.ServiceImpl;
import org.apache.ambari.server.state.cluster.ClusterFactory;
import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.configgroup.ConfigGroup;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.configgroup.ConfigGroupImpl;
@@ -80,7 +82,6 @@ import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.assistedinject.FactoryModuleBuilder;
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
@@ -296,7 +297,6 @@ public class AgentResourceTest extends RandomPortJerseyTest {
// The test will fail anyway
}
requestStaticInjection(AgentResource.class);
- bind(Clusters.class).to(ClustersImpl.class);
os_family = mock(OsFamily.class);
actionManager = mock(ActionManager.class);
ambariMetaInfo = mock(AmbariMetaInfo.class);
@@ -311,10 +311,12 @@ public class AgentResourceTest extends RandomPortJerseyTest {
bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
bind(DBAccessor.class).toInstance(mock(DBAccessor.class));
bind(HostRoleCommandDAO.class).toInstance(mock(HostRoleCommandDAO.class));
+ bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
+ bind(HostDAO.class).toInstance(createNiceMock(HostDAO.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
}
private void installDependencies() {
- install(new AmbariJpaPersistModule("ambari-javadb"));
install(new FactoryModuleBuilder().implement(
Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
install(new FactoryModuleBuilder().implement(
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 299002b..6e3f2e0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -59,7 +59,6 @@ import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.audit.AuditLogger;
import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.dao.HostDAO;
@@ -594,7 +593,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
public void testCommandReportOnHeartbeatUpdatedState()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -714,7 +713,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testUpgradeSpecificHandling() throws AmbariException, InvalidStateTransitionException {
+ public void testUpgradeSpecificHandling() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -891,7 +890,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeCompleteReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeCompleteReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -976,7 +975,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeFailReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeFailReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -1097,7 +1096,7 @@ public class HeartbeatProcessorTest {
@Test
@SuppressWarnings("unchecked")
- public void testComponentUpgradeInProgressReport() throws AmbariException, InvalidStateTransitionException {
+ public void testComponentUpgradeInProgressReport() throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 6fc6892..e813e66 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -24,6 +24,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
+import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -60,6 +61,7 @@ import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
@@ -135,7 +137,7 @@ public class HeartbeatTestHelper {
}
public Cluster getDummyCluster()
- throws AmbariException {
+ throws Exception {
Map<String, String> configProperties = new HashMap<String, String>() {{
put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -154,7 +156,7 @@ public class HeartbeatTestHelper {
public Cluster getDummyCluster(String clusterName, String desiredStackId,
Map<String, String> configProperties, Set<String> hostNames)
- throws AmbariException {
+ throws Exception {
StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
org.junit.Assert.assertNotNull(stackEntity);
@@ -177,6 +179,14 @@ public class HeartbeatTestHelper {
StackId stackId = new StackId(desiredStackId);
+ // because this test method goes around the Clusters business object, we
+ // forcefully will refresh the internal state so that any tests which
+ // incorrect use Clusters after calling this won't be affected
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+ method.setAccessible(true);
+ method.invoke(clusters);
+
Cluster cluster = clusters.getCluster(clusterName);
cluster.setDesiredStackVersion(stackId);
@@ -209,6 +219,7 @@ public class HeartbeatTestHelper {
Assert.assertNotNull(hostEntity);
hostEntities.add(hostEntity);
}
+
clusterEntity.setHostEntities(hostEntities);
clusters.mapHostsToCluster(hostNames, clusterName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index aa7ef20..0f48cf6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -160,7 +160,7 @@ public class TestHeartbeatHandler {
}
@After
- public void teardown() throws AmbariException {
+ public void teardown() throws Exception {
injector.getInstance(PersistService.class).stop();
EasyMock.reset(auditLogger);
}
@@ -345,7 +345,7 @@ public class TestHeartbeatHandler {
@Test
- public void testRegistration() throws AmbariException,
+ public void testRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -376,7 +376,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationRecoveryConfig() throws AmbariException,
+ public void testRegistrationRecoveryConfig() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -441,7 +441,7 @@ public class TestHeartbeatHandler {
//
@Test
public void testRegistrationRecoveryConfigMaintenanceMode()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -495,7 +495,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationAgentConfig() throws AmbariException,
+ public void testRegistrationAgentConfig() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -527,7 +527,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationWithBadVersion() throws AmbariException,
+ public void testRegistrationWithBadVersion() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -570,7 +570,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
+ public void testRegistrationPublicHostname() throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -602,7 +602,7 @@ public class TestHeartbeatHandler {
@Test
- public void testInvalidOSRegistration() throws AmbariException,
+ public void testInvalidOSRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
@@ -630,7 +630,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testIncompatibleAgentRegistration() throws AmbariException,
+ public void testIncompatibleAgentRegistration() throws Exception,
InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -660,7 +660,7 @@ public class TestHeartbeatHandler {
@Test
public void testRegisterNewNode()
- throws AmbariException, InvalidStateTransitionException {
+ throws Exception, InvalidStateTransitionException {
ActionManager am = actionManagerTestHelper.getMockActionManager();
replay(am);
Clusters fsm = clusters;
@@ -745,7 +745,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testStateCommandsAtRegistration() throws AmbariException, InvalidStateTransitionException {
+ public void testStateCommandsAtRegistration() throws Exception, InvalidStateTransitionException {
List<StatusCommand> dummyCmds = new ArrayList<StatusCommand>();
StatusCommand statusCmd1 = new StatusCommand();
statusCmd1.setClusterName(DummyCluster);
@@ -781,7 +781,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
+ public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -839,7 +839,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testOPFailedEventForAbortedTask() throws AmbariException, InvalidStateTransitionException {
+ public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -1281,7 +1281,7 @@ public class TestHeartbeatHandler {
@Test
@SuppressWarnings("unchecked")
- public void testIgnoreCustomActionReport() throws AmbariException, InvalidStateTransitionException {
+ public void testIgnoreCustomActionReport() throws Exception, InvalidStateTransitionException {
CommandReport cr1 = new CommandReport();
cr1.setActionId(StageUtils.getActionId(requestId, stageId));
cr1.setTaskId(1);
@@ -1343,7 +1343,7 @@ public class TestHeartbeatHandler {
}
@Test
- public void testComponents() throws AmbariException,
+ public void testComponents() throws Exception,
InvalidStateTransitionException {
ComponentsResponse expected = new ComponentsResponse();
StackId dummyStackId = new StackId(DummyStackId);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
index c36f5fe..7e564a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
@@ -18,20 +18,27 @@
package org.apache.ambari.server.api.services;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
+import static org.junit.Assert.assertEquals;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
-import static org.junit.Assert.assertEquals;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.cluster.ClusterFactory;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
+import org.apache.ambari.server.state.host.HostFactory;
+import org.easymock.EasyMock;
/**
* Unit tests for ClusterService.
@@ -42,7 +49,17 @@ public class ClusterServiceTest extends BaseServiceTest {
@Override
public List<ServiceTestInvocation> getTestInvocations() throws Exception {
List<ServiceTestInvocation> listInvocations = new ArrayList<ServiceTestInvocation>();
- Clusters clusters = new TestClusters();
+
+ ClusterDAO clusterDAO = EasyMock.createNiceMock(ClusterDAO.class);
+ HostDAO hostDAO = EasyMock.createNiceMock(HostDAO.class);
+
+ EasyMock.expect(clusterDAO.findAll()).andReturn(new ArrayList<ClusterEntity>()).atLeastOnce();
+ EasyMock.expect(hostDAO.findAll()).andReturn(new ArrayList<HostEntity>()).atLeastOnce();
+
+ EasyMock.replay(clusterDAO, hostDAO);
+
+ Clusters clusters = new TestClusters(clusterDAO, EasyMock.createNiceMock(ClusterFactory.class),
+ hostDAO, EasyMock.createNiceMock(HostFactory.class));
ClusterService clusterService;
Method m;
@@ -161,6 +178,12 @@ public class ClusterServiceTest extends BaseServiceTest {
}
private class TestClusters extends ClustersImpl {
+ public TestClusters(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+ HostFactory hostFactory) {
+
+ super(clusterDAO, clusterFactory, hostDAO, hostFactory);
+ }
+
@Override
public boolean checkPermission(String clusterName, boolean readOnly) {
return true;
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 93d261b..50f5abe 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -18,12 +18,20 @@
package org.apache.ambari.server.configuration;
-import com.google.common.eventbus.EventBus;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.AmbariException;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.ambari.server.agent.HeartbeatTestHelper;
import org.apache.ambari.server.agent.RecoveryConfig;
import org.apache.ambari.server.agent.RecoveryConfigHelper;
@@ -40,19 +48,11 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.eventbus.EventBus;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
/**
* Test RecoveryConfigHelper class
@@ -86,7 +86,7 @@ public class RecoveryConfigHelperTest {
}
@After
- public void teardown() throws AmbariException {
+ public void teardown() throws Exception {
injector.getInstance(PersistService.class).stop();
}
@@ -95,7 +95,7 @@ public class RecoveryConfigHelperTest {
*/
@Test
public void testRecoveryConfigDefaultValues()
- throws AmbariException {
+ throws Exception {
RecoveryConfig recoveryConfig = recoveryConfigHelper.getDefaultRecoveryConfig();
assertEquals(recoveryConfig.getMaxLifetimeCount(), RecoveryConfigHelper.RECOVERY_LIFETIME_MAX_COUNT_DEFAULT);
assertEquals(recoveryConfig.getMaxCount(), RecoveryConfigHelper.RECOVERY_MAX_COUNT_DEFAULT);
@@ -107,11 +107,12 @@ public class RecoveryConfigHelperTest {
/**
* Test cluster-env properties from a dummy cluster
- * @throws AmbariException
+ *
+ * @throws Exception
*/
@Test
public void testRecoveryConfigValues()
- throws AmbariException {
+ throws Exception {
String hostname = "hostname1";
Cluster cluster = getDummyCluster(hostname);
RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), hostname);
@@ -124,13 +125,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Install a component with auto start enabled. Verify that the old config was invalidated.
+ * Install a component with auto start enabled. Verify that the old config was
+ * invalidated.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testServiceComponentInstalled()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -161,12 +163,12 @@ public class RecoveryConfigHelperTest {
/**
* Uninstall a component and verify that the config is stale.
- *
- * @throws AmbariException
+ *
+ * @throws Exception
*/
@Test
public void testServiceComponentUninstalled()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -200,11 +202,11 @@ public class RecoveryConfigHelperTest {
/**
* Disable cluster level auto start and verify that the config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testClusterEnvConfigChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -238,13 +240,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Change the maintenance mode of a service component host and verify that config is stale.
+ * Change the maintenance mode of a service component host and verify that
+ * config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testMaintenanceModeChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -277,11 +280,11 @@ public class RecoveryConfigHelperTest {
/**
* Disable recovery on a component and verify that the config is stale.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testServiceComponentRecoveryChanged()
- throws AmbariException {
+ throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
Service hdfs = cluster.addService(HDFS);
hdfs.persist();
@@ -309,14 +312,14 @@ public class RecoveryConfigHelperTest {
}
/**
- * Test a cluster with two hosts. The first host gets the configuration during registration.
- * The second host gets it during it's first heartbeat.
+ * Test a cluster with two hosts. The first host gets the configuration during
+ * registration. The second host gets it during it's first heartbeat.
*
- * @throws AmbariException
+ * @throws Exception
*/
@Test
public void testMultiNodeCluster()
- throws AmbariException {
+ throws Exception {
Set<String> hostNames = new HashSet<String>() {{
add("Host1");
add("Host2");
@@ -351,7 +354,7 @@ public class RecoveryConfigHelperTest {
}
private Cluster getDummyCluster(Set<String> hostNames)
- throws AmbariException {
+ throws Exception {
Map<String, String> configProperties = new HashMap<String, String>() {{
put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -365,7 +368,7 @@ public class RecoveryConfigHelperTest {
}
private Cluster getDummyCluster(final String hostname)
- throws AmbariException {
+ throws Exception {
Set<String> hostNames = new HashSet<String>(){{
add(hostname);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 7e6a056..6ac607d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -18,10 +18,39 @@
package org.apache.ambari.server.controller;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.net.InetAddress;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.actionmanager.ActionManager;
@@ -73,7 +102,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.apache.ambari.server.state.host.HostFactory;
import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
@@ -96,37 +124,11 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import javax.persistence.EntityManager;
-import java.net.InetAddress;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.getCurrentArguments;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
@SuppressWarnings("unchecked")
public class KerberosHelperTest extends EasyMockSupport {
@@ -203,7 +205,7 @@ public class KerberosHelperTest extends EasyMockSupport {
bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
bind(RoleGraphFactory.class).to(RoleGraphFactoryImpl.class);
- bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+ bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(ConfigHelper.class).toInstance(createNiceMock(ConfigHelper.class));
bind(KerberosOperationHandlerFactory.class).toInstance(kerberosOperationHandlerFactory);
bind(ClusterController.class).toInstance(clusterController);
@@ -225,7 +227,7 @@ public class KerberosHelperTest extends EasyMockSupport {
StageUtils.setTopologyManager(topologyManager);
expect(topologyManager.getPendingHostComponents()).andReturn(
Collections.<String, Collection<String>>emptyMap()).anyTimes();
-
+
StageUtils.setConfiguration(configuration);
expect(configuration.getApiSSLAuthentication()).andReturn(false).anyTimes();
expect(configuration.getClientApiPort()).andReturn(8080).anyTimes();
@@ -2538,7 +2540,7 @@ public class KerberosHelperTest extends EasyMockSupport {
expect(createKeytabFilesServerAction.createKeytab(capture(capturePrincipalForKeytab), eq("password"), eq(1), anyObject(KerberosOperationHandler.class), eq(true), eq(true), isNull(ActionLog.class)))
.andReturn(new Keytab())
.times(3);
-
+
replayAll();
AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
index c7261ea..70f0332 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
@@ -255,7 +255,10 @@ public class ClusterResourceProviderTest {
@Test
public void testCreateResourcesWithRetry() throws Exception {
- RetryHelper.init(3);
+ Clusters clusters = createMock(Clusters.class);
+ EasyMock.replay(clusters);
+
+ RetryHelper.init(clusters, 3);
Resource.Type type = Resource.Type.Cluster;
AmbariManagementController managementController = createMock(AmbariManagementController.class);
@@ -309,7 +312,7 @@ public class ClusterResourceProviderTest {
// verify
verify(managementController, response);
- RetryHelper.init(0);
+ RetryHelper.init(clusters, 0);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index de2d292..f605276 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -18,12 +18,11 @@
package org.apache.ambari.server.orm;
-import javax.persistence.EntityManager;
-import junit.framework.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import java.lang.reflect.Method;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
@@ -35,6 +34,8 @@ import java.util.Map;
import java.util.Set;
import java.util.UUID;
+import javax.persistence.EntityManager;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
@@ -88,6 +89,7 @@ import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.alert.Scope;
import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
import org.springframework.security.crypto.password.PasswordEncoder;
import com.google.inject.Inject;
@@ -96,6 +98,8 @@ import com.google.inject.Provider;
import com.google.inject.Singleton;
import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+
@Singleton
public class OrmTestHelper {
@@ -308,7 +312,7 @@ public class OrmTestHelper {
* @return the cluster ID.
*/
@Transactional
- public Long createCluster() {
+ public Long createCluster() throws Exception {
return createCluster(CLUSTER_NAME);
}
@@ -318,7 +322,7 @@ public class OrmTestHelper {
* @return the cluster ID.
*/
@Transactional
- public Long createCluster(String clusterName) {
+ public Long createCluster(String clusterName) throws Exception {
// required to populate the database with stacks
injector.getInstance(AmbariMetaInfo.class);
@@ -354,6 +358,15 @@ public class OrmTestHelper {
clusterEntity = clusterDAO.findByName(clusterEntity.getClusterName());
assertNotNull(clusterEntity);
assertTrue(clusterEntity.getClusterId() > 0);
+
+ // because this test method goes around the Clusters business object, we
+ // forcefully will refresh the internal state so that any tests which
+ // incorrect use Clusters after calling this won't be affected
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+ method.setAccessible(true);
+ method.invoke(clusters);
+
return clusterEntity.getClusterId();
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
index 4edfdcb..2e0c232 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
@@ -73,7 +73,7 @@ public class ClusterVersionDAOTest {
* Helper function to transition the cluster through several cluster versions.
* @param currStep Step to go to is a value from 1 - 7, inclusive.
*/
- private void createRecordsUntilStep(int currStep) {
+ private void createRecordsUntilStep(int currStep) throws Exception {
// Fresh install on A
if (currStep >= 1 && lastStep <= 0) {
clusterId = helper.createCluster();
@@ -147,7 +147,7 @@ public class ClusterVersionDAOTest {
}
@Test
- public void testFindByStackAndVersion() {
+ public void testFindByStackAndVersion() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(
0,
@@ -161,14 +161,14 @@ public class ClusterVersionDAOTest {
}
@Test
- public void testFindByCluster() {
+ public void testFindByCluster() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(0, clusterVersionDAO.findByCluster("non existing").size());
Assert.assertEquals(1, clusterVersionDAO.findByCluster(cluster.getClusterName()).size());
}
@Test
- public void testFindByClusterAndStackAndVersion() {
+ public void testFindByClusterAndStackAndVersion() throws Exception {
createRecordsUntilStep(1);
Assert.assertNull(clusterVersionDAO.findByClusterAndStackAndVersion(
cluster.getClusterName(), BAD_STACK, "non existing"));
@@ -181,7 +181,7 @@ public class ClusterVersionDAOTest {
* At all times the cluster should have a cluster version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}
*/
@Test
- public void testFindByClusterAndStateCurrent() {
+ public void testFindByClusterAndStateCurrent() throws Exception {
createRecordsUntilStep(1);
Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
@@ -208,7 +208,7 @@ public class ClusterVersionDAOTest {
* Test the state of certain cluster versions.
*/
@Test
- public void testFindByClusterAndState() {
+ public void testFindByClusterAndState() throws Exception {
createRecordsUntilStep(1);
Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
index 33e2636..c73843f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
@@ -22,9 +22,6 @@ import java.util.Arrays;
import java.util.List;
import java.util.Set;
-import com.google.inject.assistedinject.AssistedInject;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -36,10 +33,9 @@ import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.host.HostFactory;
import org.junit.After;
import org.junit.Before;
@@ -49,6 +45,8 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
+import junit.framework.Assert;
+
public class ConfigGroupDAOTest {
private Injector injector;
private ConfigGroupDAO configGroupDAO;
@@ -88,28 +86,13 @@ public class ConfigGroupDAOTest {
private ConfigGroupEntity createConfigGroup(String clusterName,
String groupName, String tag, String desc, List<HostEntity> hosts,
List<ClusterConfigEntity> configs) throws Exception {
- ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
-
- // create an admin resource to represent this cluster
- ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
- if (resourceTypeEntity == null) {
- resourceTypeEntity = new ResourceTypeEntity();
- resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
- resourceTypeEntity.setName(ResourceType.CLUSTER.name());
- resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- }
-
+ Clusters clusters = injector.getInstance(Clusters.class);
StackEntity stackEntity = stackDAO.find("HDP", "0.1");
- ResourceEntity resourceEntity = new ResourceEntity();
- resourceEntity.setResourceType(resourceTypeEntity);
+ clusters.addCluster(clusterName, new StackId(stackEntity));
+ ClusterEntity clusterEntity = clusterDAO.findByName(clusterName);
- ClusterEntity clusterEntity = new ClusterEntity();
- clusterEntity.setClusterName(clusterName);
- clusterEntity.setResource(resourceEntity);
- clusterEntity.setDesiredStack(stackEntity);
-
- clusterDAO.create(clusterEntity);
+ ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
configGroupEntity.setClusterEntity(clusterEntity);
configGroupEntity.setClusterId(clusterEntity.getClusterId());
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
index 9d390a9..1267f96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
@@ -18,6 +18,8 @@
package org.apache.ambari.server.orm.dao;
+import java.util.UUID;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -38,8 +40,6 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import java.util.UUID;
-
/**
* RepositoryVersionDAO unit tests.
*/
@@ -174,7 +174,7 @@ public class RepositoryVersionDAOTest {
}
@Test
- public void testDeleteCascade() {
+ public void testDeleteCascade() throws Exception {
long clusterId = helper.createCluster();
ClusterEntity cluster = clusterDAO.findById(clusterId);
createSingleRecord();
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
index 77f87be..4029bae 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
@@ -17,8 +17,13 @@
*/
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNull;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -26,19 +31,15 @@ import org.apache.ambari.server.orm.entities.SettingEntity;
import org.junit.Before;
import org.junit.Test;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNull;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
public class SettingDAOTest {
private Injector injector;
private SettingDAO dao;
@Before
- public void setUp() {
+ public void setUp() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
dao = injector.getInstance(SettingDAO.class);
injector.getInstance(GuiceJpaInitializer.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
index 080558a..8be805a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
@@ -18,6 +18,9 @@
package org.apache.ambari.server.orm.dao;
+import java.util.LinkedList;
+import java.util.List;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -33,9 +36,6 @@ import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
-import java.util.LinkedList;
-import java.util.List;
-
/**
* WidgetDAO unit tests.
*/
@@ -49,7 +49,7 @@ public class WidgetDAOTest {
@Before
- public void before() {
+ public void before() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
widgetDAO = injector.getInstance(WidgetDAO.class);
widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
index 5b9ea6a..f50ae44 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
@@ -18,9 +18,9 @@
package org.apache.ambari.server.orm.dao;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import java.util.LinkedList;
+import java.util.List;
+
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
@@ -32,8 +32,9 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import java.util.LinkedList;
-import java.util.List;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
/**
* WidgetLayoutDAO unit tests.
@@ -48,7 +49,7 @@ public class WidgetLayoutDAOTest {
@Before
- public void before() {
+ public void before() throws Exception {
injector = Guice.createInjector(new InMemoryDefaultTestModule());
widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);
widgetDAO = injector.getInstance(WidgetDAO.class);
http://git-wip-us.apache.org/repos/asf/ambari/blob/d53c9e2e/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index 02ce9fb..d8b6a83 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -21,16 +21,11 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
-import junit.framework.Assert;
-
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
@@ -45,6 +40,8 @@ import com.google.inject.Injector;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+
public class ConfigGroupTest {
private Clusters clusters;
@@ -109,6 +106,7 @@ public class ConfigGroupTest {
"HDFS", "New HDFS configs for h1", configs, hosts);
configGroup.persist();
+ cluster.addConfigGroup(configGroup);
return configGroup;
}
[15/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fbfcf98a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fbfcf98a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fbfcf98a
Branch: refs/heads/trunk
Commit: fbfcf98abd5b8ced11b9b5ef35f73a9d319d0c5e
Parents: 197a37f 8813b1f
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Oct 7 15:58:00 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 7 15:58:00 2016 -0400
----------------------------------------------------------------------
.../server/state/DependencyConditionInfo.java | 102 -------------------
.../ambari/server/state/DependencyInfo.java | 36 +------
.../server/topology/BlueprintValidatorImpl.java | 13 ---
.../common-services/HDFS/2.1.0.2.0/metainfo.xml | 44 --------
.../topology/BlueprintValidatorImplTest.java | 75 +-------------
.../ambari/view/hive2/ConnectionFactory.java | 37 ++++---
.../ambari/view/hive2/PropertyValidator.java | 8 ++
.../savedQueries/SavedQueryService.java | 47 ++++++++-
.../ui/hive-web/app/controllers/queries.js | 24 ++++-
.../ui/hive-web/app/initializers/i18n.js | 5 +
.../views/hive-next/src/main/resources/view.xml | 9 ++
.../savedQueries/SavedQueryService.java | 48 +++++++--
.../ui/hive-web/app/controllers/queries.js | 22 +++-
.../ui/hive-web/app/initializers/i18n.js | 5 +-
.../savedQueries/SavedQueryServiceTest.java | 16 ++-
15 files changed, 193 insertions(+), 298 deletions(-)
----------------------------------------------------------------------
[19/32] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-18456
Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ed2018bf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ed2018bf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ed2018bf
Branch: refs/heads/trunk
Commit: ed2018bf06ec5ee18eb3c0c618f664db0d058025
Parents: 5cf5c83 6587fda
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Oct 10 20:37:39 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Oct 10 20:37:39 2016 -0400
----------------------------------------------------------------------
ambari-server/conf/unix/ambari.properties | 6 +
ambari-server/conf/unix/install-helper.sh | 1 +
ambari-server/conf/windows/ambari.properties | 6 +
ambari-server/sbin/ambari-server | 4 +-
.../server/configuration/Configuration.java | 135 +++++++++++++++++++
.../AmbariManagementControllerImpl.java | 2 +-
.../server/controller/KerberosHelperImpl.java | 6 +-
.../controller/ServiceComponentHostRequest.java | 4 +-
.../BlueprintConfigurationProcessor.java | 113 ++++++++++++----
.../internal/HostComponentResourceProvider.java | 44 +++---
.../security/AbstractSecurityHeaderFilter.java | 43 ++++++
.../AmbariServerSecurityHeaderFilter.java | 3 +
.../AmbariViewsSecurityHeaderFilter.java | 3 +
.../server/serveraction/ServerAction.java | 6 +
.../BlueprintConfigurationProcessorTest.java | 82 +++++++++++
.../AbstractSecurityHeaderFilterTest.java | 38 +++++-
.../AmbariServerSecurityHeaderFilterTest.java | 7 +
.../AmbariViewsSecurityHeaderFilterTest.java | 6 +
.../main/admin/stack_and_upgrade_controller.js | 56 ++++----
ambari-web/app/messages.js | 1 +
ambari-web/app/routes/add_service_routes.js | 24 ++++
.../configs/service_configs_by_category_view.js | 12 +-
.../admin/stack_and_upgrade_controller_test.js | 45 +++++--
.../8.0.5/package/scripts/microsoft_r.py | 11 +-
.../MICROSOFT_R/8.0.5/metainfo.xml | 19 +++
.../MICROSOFT_R/8.0.5/repos/repoinfo.xml | 7 +
26 files changed, 583 insertions(+), 101 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/ed2018bf/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------