You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/12/08 23:30:02 UTC
[23/25] ambari git commit: Merge from branch-2.5
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
index d187947..e8c4b5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
@@ -1492,7 +1492,7 @@ public class AlertsDAO implements Cleanable {
* @return a long representing the number of affected (deleted) records
*/
@Transactional
- private int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+ int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
LOG.info("Deleting AlertNotice entities before date " + new Date(beforeDateMillis));
EntityManager entityManager = m_entityManagerProvider.get();
List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1523,7 +1523,7 @@ public class AlertsDAO implements Cleanable {
* @return a long representing the number of affected (deleted) records
*/
@Transactional
- private int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
+ int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
LOG.info("Deleting AlertCurrent entities before date " + new Date(beforeDateMillis));
EntityManager entityManager = m_entityManagerProvider.get();
List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1553,7 +1553,7 @@ public class AlertsDAO implements Cleanable {
*/
@Transactional
- private int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+ int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
return executeQuery("AlertHistoryEntity.removeInClusterBeforeDate", AlertHistoryEntity.class, clusterId, beforeDateMillis);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
index 04c6030..5748dc9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigMappingEntity.java
@@ -27,15 +27,17 @@ import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
+import com.google.common.base.Objects;
+
/**
* Entity that maps to a cluster config mapping.
*/
-@Table(name = "clusterconfigmapping")
@Entity
+@Table(name = "clusterconfigmapping")
@IdClass(ClusterConfigMappingEntityPK.class)
-@NamedQueries({
- @NamedQuery(name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
- query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName")})
+@NamedQueries({ @NamedQuery(
+ name = "ClusterConfigMappingEntity.findLatestClusterConfigMappingsByType",
+ query = "SELECT mapping FROM ClusterConfigMappingEntity mapping WHERE mapping.clusterId = :clusterId AND mapping.selectedInd > 0 AND mapping.typeName = :typeName") })
public class ClusterConfigMappingEntity {
@@ -192,4 +194,14 @@ public class ClusterConfigMappingEntity {
return true;
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String toString() {
+ return Objects.toStringHelper(this).add("clusterId", clusterId).add("type", typeName).add("tag",
+ tag).add("selected", selectedInd).add("created", createTimestamp).toString();
+ }
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
index 58b2e5d..dc71b61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
@@ -101,7 +101,7 @@ public class GroupEntity {
return ldapGroup == 0 ? Boolean.FALSE : Boolean.TRUE;
}
- public void setLdapGroup(Boolean ldapGroup) {
+ private void setLdapGroup(Boolean ldapGroup) {
if (ldapGroup == null) {
this.ldapGroup = null;
} else {
@@ -113,8 +113,9 @@ public class GroupEntity {
return groupType;
}
- public void setgroupType(GroupType groupType) {
+ public void setGroupType(GroupType groupType) {
this.groupType = groupType;
+ setLdapGroup(groupType == GroupType.LDAP);
}
public Set<MemberEntity> getMemberEntities() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 2f7bdd0..4b3237b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -49,10 +49,8 @@ import org.apache.ambari.server.orm.entities.PermissionEntity;
import org.apache.ambari.server.orm.entities.PrincipalEntity;
import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
import org.apache.ambari.server.orm.entities.UserEntity;
-import org.apache.ambari.server.security.ClientSecurityType;
import org.apache.ambari.server.security.ldap.LdapBatchDto;
import org.apache.ambari.server.security.ldap.LdapUserGroupMemberDto;
import org.apache.commons.lang.StringUtils;
@@ -277,7 +275,7 @@ public class Users {
public synchronized void setGroupLdap(String groupName) throws AmbariException {
GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
if (groupEntity != null) {
- groupEntity.setLdapGroup(true);
+ groupEntity.setGroupType(GroupType.LDAP);
groupDAO.merge(groupEntity);
} else {
throw new AmbariException("Group " + groupName + " doesn't exist");
@@ -435,7 +433,7 @@ public class Users {
final GroupEntity groupEntity = new GroupEntity();
groupEntity.setGroupName(groupName);
groupEntity.setPrincipal(principalEntity);
- groupEntity.setgroupType(groupType);
+ groupEntity.setGroupType(groupType);
groupDAO.create(groupEntity);
}
@@ -701,7 +699,7 @@ public class Users {
final Set<GroupEntity> groupsToBecomeLdap = new HashSet<GroupEntity>();
for (String groupName : batchInfo.getGroupsToBecomeLdap()) {
final GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
- groupEntity.setLdapGroup(true);
+ groupEntity.setGroupType(GroupType.LDAP);
allGroups.put(groupEntity.getGroupName(), groupEntity);
groupsToBecomeLdap.add(groupEntity);
}
@@ -737,7 +735,7 @@ public class Users {
final GroupEntity groupEntity = new GroupEntity();
groupEntity.setGroupName(groupName);
groupEntity.setPrincipal(principalEntity);
- groupEntity.setLdapGroup(true);
+ groupEntity.setGroupType(GroupType.LDAP);
allGroups.put(groupEntity.getGroupName(), groupEntity);
groupsToCreate.add(groupEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index 5459ddb..97280ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -451,7 +451,7 @@ public class ConfigureAction extends AbstractServerAction {
// of creating a whole new history record since it was already done
if (!targetStack.equals(currentStack) && targetStack.equals(configStack)) {
config.setProperties(newValues);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
}
@@ -570,8 +570,9 @@ public class ConfigureAction extends AbstractServerAction {
for(Replace replacement: replacements){
if(isOperationAllowed(cluster, configType, replacement.key,
- replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState))
+ replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState)) {
allowedReplacements.add(replacement);
+ }
}
return allowedReplacements;
@@ -582,8 +583,9 @@ public class ConfigureAction extends AbstractServerAction {
for(ConfigurationKeyValue configurationKeyValue: sets){
if(isOperationAllowed(cluster, configType, configurationKeyValue.key,
- configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState))
+ configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState)) {
allowedSets.add(configurationKeyValue);
+ }
}
return allowedSets;
@@ -593,14 +595,16 @@ public class ConfigureAction extends AbstractServerAction {
List<Transfer> allowedTransfers = new ArrayList<>();
for (Transfer transfer : transfers) {
String key = "";
- if(transfer.operation == TransferOperation.DELETE)
+ if(transfer.operation == TransferOperation.DELETE) {
key = transfer.deleteKey;
- else
+ } else {
key = transfer.fromKey;
+ }
if(isOperationAllowed(cluster, configType, key,
- transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState))
+ transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState)) {
allowedTransfers.add(transfer);
+ }
}
return allowedTransfers;
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
index ffa21ab..4833729 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
@@ -18,7 +18,11 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -28,13 +32,7 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.commons.lang.StringUtils;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
/**
* During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -78,7 +76,7 @@ public class FixLzoCodecPath extends AbstractServerAction {
}
}
config.setProperties(properties);
- config.persist(false);
+ config.save();
}
if (modifiedProperties.isEmpty()) {
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
index 3a06476..75588d5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
@@ -18,7 +18,9 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -28,8 +30,7 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.commons.lang.StringUtils;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
/**
* During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -86,7 +87,7 @@ public class FixOozieAdminUsers extends AbstractServerAction {
oozieProperties.put(OOZIE_ADMIN_USERS_PROP, newOozieAdminUsers);
oozieConfig.setProperties(oozieProperties);
- oozieConfig.persist(false);
+ oozieConfig.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("Set oozie admin users to %s", newOozieAdminUsers), "");
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
index 7f6d4b1..739dd7e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
@@ -18,7 +18,10 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.math.BigDecimal;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -27,9 +30,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
-import java.math.BigDecimal;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
/**
* Computes HBase properties. This class is only used when moving from
@@ -79,8 +80,9 @@ public class HBaseConfigCalculation extends AbstractServerAction {
"Upper or lower memstore limit setting value is malformed, skipping", "");
}
- if (lowerLimit.scale() < 2) //make sure result will have at least 2 digits after decimal point
+ if (lowerLimit.scale() < 2) {
lowerLimit = lowerLimit.setScale(2, BigDecimal.ROUND_HALF_UP);
+ }
BigDecimal lowerLimitNew = lowerLimit.divide(upperLimit, BigDecimal.ROUND_HALF_UP);
properties.put(NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString());
@@ -90,7 +92,7 @@ public class HBaseConfigCalculation extends AbstractServerAction {
properties.remove(OLD_LOWER_LIMIT_PROPERTY_NAME);
config.setProperties(properties);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("%s was set to %s", NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString()), "");
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
index b238bca..fb15555 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
@@ -18,7 +18,11 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
/**
* Computes HBase Env content property.
@@ -79,7 +80,7 @@ public class HBaseEnvMaxDirectMemorySizeAction extends AbstractServerAction {
properties.put(CONTENT_NAME, appendedContent);
config.setProperties(properties);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("The %s/%s property was appended with %s", SOURCE_CONFIG_TYPE, CONTENT_NAME, APPEND_CONTENT_LINE),"");
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
index 0e10160..c5000bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
@@ -18,7 +18,11 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
/**
* Append hive-env config type with HIVE_HOME and HIVE_CONF_DIR variables if they are absent
@@ -103,7 +104,7 @@ public class HiveEnvClasspathAction extends AbstractServerAction {
}
config.setProperties(properties);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("Added %s, %s to content at %s", HIVE_CONF_DIR, HIVE_HOME, TARGET_CONFIG_TYPE), "");
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
index 0ade30b..7ebad08 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
@@ -85,7 +85,7 @@ public class HiveZKQuorumConfigAction extends AbstractServerAction {
hiveSiteProperties.put(HIVE_SITE_ZK_CONNECT_STRING, zookeeperQuorum);
hiveSite.setProperties(hiveSiteProperties);
- hiveSite.persist(false);
+ hiveSite.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("Successfully set %s and %s in %s", HIVE_SITE_ZK_QUORUM,
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
index 4da67ca..9b8a7dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
@@ -18,7 +18,11 @@
package org.apache.ambari.server.serveraction.upgrades;
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
/**
* Changes oozie-env during upgrade (adds -Dhdp.version to $HADOOP_OPTS variable)
@@ -67,7 +68,7 @@ public class OozieConfigCalculation extends AbstractServerAction {
}
config.setProperties(properties);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("Added -Dhdp.version to $HADOOP_OPTS variable at %s", TARGET_CONFIG_TYPE), "");
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
index ff4a20e..8e0161b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
@@ -141,13 +141,13 @@ public class RangerConfigCalculation extends AbstractServerAction {
targetValues.put("ranger.jpa.audit.jdbc.dialect", dialect);
config.setProperties(targetValues);
- config.persist(false);
+ config.save();
config = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);
targetValues = config.getProperties();
targetValues.put("ranger_privelege_user_jdbc_url", userJDBCUrl);
config.setProperties(targetValues);
- config.persist(false);
+ config.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", stdout.toString(), "");
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
index ba0da79..c059c9e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
@@ -87,7 +87,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != hadoopUser) {
targetValues.put(RANGER_PLUGINS_HDFS_SERVICE_USER, hadoopUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HDFS_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hdfs_user", HADOOP_ENV_CONFIG_TYPE);
@@ -104,7 +104,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != hiveUser) {
targetValues.put(RANGER_PLUGINS_HIVE_SERVICE_USER, hiveUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HIVE_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hive_user", HIVE_ENV_CONFIG_TYPE);
@@ -121,7 +121,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != yarnUser) {
targetValues.put(RANGER_PLUGINS_YARN_SERVICE_USER, yarnUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_YARN_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "yarn_user", YARN_ENV_CONFIG_TYPE);
@@ -138,7 +138,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != hbaseUser) {
targetValues.put(RANGER_PLUGINS_HBASE_SERVICE_USER, hbaseUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HBASE_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hbase_user", HBASE_ENV_CONFIG_TYPE);
@@ -155,7 +155,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != knoxUser) {
targetValues.put(RANGER_PLUGINS_KNOX_SERVICE_USER, knoxUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KNOX_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "knox_user", KNOX_ENV_CONFIG_TYPE);
@@ -190,7 +190,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
}
targetValues.put(RANGER_PLUGINS_STORM_SERVICE_USER, stormValue);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_STORM_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "storm_user", STORM_ENV_CONFIG_TYPE);
@@ -207,7 +207,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != kafkaUser) {
targetValues.put(RANGER_PLUGINS_KAFKA_SERVICE_USER, kafkaUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KAFKA_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kafka_user", KAFKA_ENV_CONFIG_TYPE);
@@ -224,7 +224,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != rangerKmsUser) {
targetValues.put(RANGER_PLUGINS_KMS_SERVICE_USER, rangerKmsUser);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KMS_SERVICE_USER);
} else {
errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kms_user", RANGER_KMS_ENV_CONFIG_TYPE);
@@ -243,10 +243,10 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
if (null != spnegoKeytab) {
targetValues.put(RANGER_SPNEGO_KEYTAB, spnegoKeytab);
rangerAdminconfig.setProperties(targetValues);
- rangerAdminconfig.persist(false);
+ rangerAdminconfig.save();
sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_KEYTAB);
} else {
- errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);
+ errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);
}
} else {
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
index bb88f55..25387cc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
@@ -29,7 +29,6 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.SecurityType;
-import org.apache.commons.lang.StringUtils;
import com.google.inject.Inject;
@@ -83,7 +82,7 @@ public class RangerKmsProxyConfig extends AbstractServerAction {
targetValues.put(groupProp, "*");
targetValues.put(hostProp, "*");
kmsSite.setProperties(targetValues);
- kmsSite.persist(false);
+ kmsSite.save();
outputMsg = outputMsg + MessageFormat.format("Successfully added properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);
} else {
outputMsg = outputMsg + MessageFormat.format("Kerberos not enable, not setting proxy properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
index 299a373..b1aa6e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
@@ -25,7 +25,6 @@ import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceNotFoundException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.serveraction.AbstractServerAction;
@@ -89,7 +88,7 @@ public class SparkShufflePropertyConfig extends AbstractServerAction {
yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES, newAuxServices);
yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS, YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS_VALUE);
yarnSiteConfig.setProperties(yarnSiteProperties);
- yarnSiteConfig.persist(false);
+ yarnSiteConfig.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("%s was set from %s to %s. %s was set to %s",
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
index feefcaf..d638858 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
@@ -67,7 +67,7 @@ public class YarnConfigCalculation extends AbstractServerAction {
yarnSiteProperties.put(YARN_RM_ZK_ADDRESS_PROPERTY_NAME, zkServersStr);
yarnSiteProperties.put(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME, zkServersStr);
yarnSiteConfig.setProperties(yarnSiteProperties);
- yarnSiteConfig.persist(false);
+ yarnSiteConfig.save();
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
String.format("%s was set from %s to %s. %s was set from %s to %s",
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 37f4167..52d39e0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -44,16 +44,19 @@ import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
import org.apache.ambari.server.state.stack.StackMetainfoXml;
import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.upgrade.Grouping;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
-import com.google.common.collect.ListMultimap;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
+
/**
* Stack module which provides all functionality related to parsing and fully
* resolving stacks from the stack definition.
@@ -831,6 +834,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
if (configPack == null) {
return;
}
+
for (ServiceModule module : serviceModules.values()) {
File upgradesFolder = module.getModuleInfo().getServiceUpgradesFolder();
if (upgradesFolder != null) {
@@ -866,12 +870,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
private UpgradePack getServiceUpgradePack(UpgradePack pack, File upgradesFolder) throws AmbariException {
File stackFolder = new File(upgradesFolder, stackInfo.getName());
File versionFolder = new File(stackFolder, stackInfo.getVersion());
+ // !!! relies on the service upgrade pack filename being named the exact same
File servicePackFile = new File(versionFolder, pack.getName() + ".xml");
+
LOG.info("Service folder: " + servicePackFile.getAbsolutePath());
- if (!servicePackFile.exists()) {
- return null;
+ if (servicePackFile.exists()) {
+ return parseServiceUpgradePack(pack, servicePackFile);
+ } else {
+ UpgradePack child = findServiceUpgradePack(pack, stackFolder);
+
+ return null == child ? null : parseServiceUpgradePack(pack, child);
}
- return parseServiceUpgradePack(pack, servicePackFile);
}
/**
@@ -879,6 +888,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
* for any service which specifies its own upgrade.
*/
private void mergeUpgradePack(UpgradePack pack, List<UpgradePack> servicePacks) throws AmbariException {
+
List<Grouping> originalGroups = pack.getAllGroups();
Map<String, List<Grouping>> allGroupMap = new HashMap<>();
for (Grouping group : originalGroups) {
@@ -886,8 +896,21 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
list.add(group);
allGroupMap.put(group.name, list);
}
+
for (UpgradePack servicePack : servicePacks) {
for (Grouping group : servicePack.getAllGroups()) {
+
+ /*
+ !!! special case where the service pack is targeted for any version. When
+ a service UP targets to run after another group, check to make sure that the
+ base UP contains the group.
+ */
+ if (servicePack.isAllTarget() && !allGroupMap.keySet().contains(group.addAfterGroup)) {
+ LOG.warn("Service Upgrade Pack specified after-group of {}, but that is not found in {}",
+ group.addAfterGroup, StringUtils.join(allGroupMap.keySet(), ','));
+ continue;
+ }
+
if (allGroupMap.containsKey(group.name)) {
List<Grouping> list = allGroupMap.get(group.name);
Grouping first = list.get(0);
@@ -903,8 +926,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
else {
list.add(group);
}
- }
- else {
+ } else {
List<Grouping> list = new ArrayList<>();
list.add(group);
allGroupMap.put(group.name, list);
@@ -931,15 +953,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
*/
private void orderGroups(List<Grouping> groups, Map<String, Grouping> mergedGroupMap) throws AmbariException {
Map<String, List<Grouping>> skippedGroups = new HashMap<>();
+
for (Map.Entry<String, Grouping> entry : mergedGroupMap.entrySet()) {
- String key = entry.getKey();
Grouping group = entry.getValue();
+
if (!groups.contains(group)) {
boolean added = addGrouping(groups, group);
if (added) {
addSkippedGroup(groups, skippedGroups, group);
} else {
List<Grouping> tmp = null;
+
// store the group until later
if (skippedGroups.containsKey(group.addAfterGroup)) {
tmp = skippedGroups.get(group.addAfterGroup);
@@ -951,6 +975,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
}
}
}
+
if (!skippedGroups.isEmpty()) {
throw new AmbariException("Missing groups: " + skippedGroups.keySet());
}
@@ -996,6 +1021,50 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
}
/**
+ * Finds an upgrade pack that:
+ * <ul>
+ * <li>Is found in the $SERVICENAME/upgrades/$STACKNAME folder</li>
+ * <li>Matches the same {@link UpgradeType#getType()}as the {@code base} upgrade pack</li>
+ * <li>Has the {@link UpgradePack#getTarget()} value equals to "*"</li>
+ * <li>Has the {@link UpgradePack#getTargetStack()} value equals to "*"</li>
+ * </ul>
+ * This method will not attempt to resolve the "most correct" upgrade pack. For this
+ * feature to work, there should be only one upgrade pack per type. If more specificity
+ * is required, then follow the convention of $SERVICENAME/upgrades/$STACKNAME/$STACKVERSION/$BASE_FILE_NAME.xml
+ *
+ * @param base the base upgrade pack for a stack
+ * @param upgradeStackDirectory service directory that contains stack upgrade files.
+ * @return an upgrade pack that matches {@code base}
+ */
+ private UpgradePack findServiceUpgradePack(UpgradePack base, File upgradeStackDirectory) {
+ if (!upgradeStackDirectory.exists() || !upgradeStackDirectory.isDirectory()) {
+ return null;
+ }
+
+ File[] upgradeFiles = upgradeStackDirectory.listFiles(StackDirectory.XML_FILENAME_FILTER);
+ if (0 == upgradeFiles.length) {
+ return null;
+ }
+
+ for (File f : upgradeFiles) {
+ try {
+ UpgradePack upgradePack = unmarshaller.unmarshal(UpgradePack.class, f);
+
+ // !!! if the type is the same and the target is "*", then it's good to merge
+ if (upgradePack.isAllTarget() && upgradePack.getType() == base.getType()) {
+ return upgradePack;
+ }
+
+ } catch (Exception e) {
+ LOG.warn("File {} does not appear to be an upgrade pack and will be skipped ({})",
+ f.getAbsolutePath(), e.getMessage());
+ }
+ }
+
+ return null;
+ }
+
+ /**
* Parses the service specific upgrade file and merges the none order elements
* (prerequisite check and processing sections).
*/
@@ -1008,12 +1077,24 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
throw new AmbariException("Unable to parse service upgrade file at location: " + serviceFile.getAbsolutePath(), e);
}
- parent.mergePrerequisiteChecks(pack);
- parent.mergeProcessing(pack);
+ return parseServiceUpgradePack(parent, pack);
+ }
- return pack;
+ /**
+ * Places prerequisite checks and processing objects onto the parent upgrade pack.
+ *
+ * @param parent the parent upgrade pack
+ * @param child the parsed child upgrade pack
+ * @return the child upgrade pack
+ */
+ private UpgradePack parseServiceUpgradePack(UpgradePack parent, UpgradePack child) {
+ parent.mergePrerequisiteChecks(child);
+ parent.mergeProcessing(child);
+
+ return child;
}
+
/**
* Process repositories associated with the stack.
* @throws AmbariException if unable to fully process the stack repositories
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
index 1e494b4..bd9b798 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
@@ -133,6 +133,9 @@ public class ComponentInfo {
private String timelineAppid;
+ @XmlElement(name="customFolder")
+ private String customFolder;
+
public ComponentInfo() {
}
@@ -158,6 +161,7 @@ public class ComponentInfo {
clientConfigFiles = prototype.clientConfigFiles;
timelineAppid = prototype.timelineAppid;
reassignAllowed = prototype.reassignAllowed;
+ customFolder = prototype.customFolder;
}
public String getName() {
@@ -396,6 +400,14 @@ public class ComponentInfo {
this.reassignAllowed = reassignAllowed;
}
+ public String getCustomFolder() {
+ return customFolder;
+ }
+
+ public void setCustomFolder(String customFolder) {
+ this.customFolder = customFolder;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -426,6 +438,7 @@ public class ComponentInfo {
if (name != null ? !name.equals(that.name) : that.name != null) return false;
if (clientConfigFiles != null ? !clientConfigFiles.equals(that.clientConfigFiles) :
that.clientConfigFiles != null) return false;
+ if (customFolder != null ? !customFolder.equals(that.customFolder) : that.customFolder != null) return false;
return true;
}
@@ -450,6 +463,7 @@ public class ComponentInfo {
result = 31 * result + (clientConfigFiles != null ? clientConfigFiles.hashCode() : 0);
// NULL = 0, TRUE = 2, FALSE = 1
result = 31 * result + (versionAdvertisedField != null ? (versionAdvertisedField.booleanValue() ? 2 : 1) : 0);
+ result = 31 * result + (customFolder != null ? customFolder.hashCode() : 0);
return result;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
index b35aad9..67570f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
@@ -30,8 +30,6 @@ public interface Config {
void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes);
- void setStackId(StackId stackId);
-
/**
* @return Config Type
*/
@@ -66,18 +64,6 @@ public interface Config {
public Map<String, Map<String, String>> getPropertiesAttributes();
/**
- * Change the version tag
- * @param versionTag
- */
- public void setTag(String versionTag);
-
- /**
- * Set config version
- * @param version
- */
- public void setVersion(Long version);
-
- /**
* Replace properties with new provided set
* @param properties Property Map to replace existing one
*/
@@ -110,11 +96,5 @@ public interface Config {
/**
* Persist the configuration.
*/
- public void persist();
-
- /**
- * Persist the configuration, optionally creating a new config entity.
- */
- public void persist(boolean newConfig);
-
+ public void save();
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
index eaf68aa..d6cd997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
@@ -27,18 +27,20 @@ import com.google.inject.assistedinject.Assisted;
* Factory for creating configuration objects using {@link Assisted} constructor parameters
*/
public interface ConfigFactory {
-
+
/**
* Creates a new {@link Config} object using provided values.
*
* @param cluster
* @param type
+ * @param tag
* @param map
* @param mapAttributes
* @return
*/
- Config createNew(Cluster cluster, String type, Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
-
+ Config createNew(Cluster cluster, @Assisted("type") String type, @Assisted("tag") String tag,
+ Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
+
/**
* Creates a new {@link Config} object using provided entity
*
@@ -48,4 +50,16 @@ public interface ConfigFactory {
*/
Config createExisting(Cluster cluster, ClusterConfigEntity entity);
+ /**
+ * Creates a read-only instance of a {@link Config} suitable for returning in
+ * REST responses.
+ *
+ * @param type
+ * @param tag
+ * @param map
+ * @param mapAttributes
+ * @return
+ */
+ Config createReadOnly(@Assisted("type") String type, @Assisted("tag") String tag,
+ Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 1f52e6a..0a861d8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -18,27 +18,29 @@
package org.apache.ambari.server.state;
-import java.util.Collections;
-import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.annotation.Nullable;
import org.apache.ambari.server.events.ClusterConfigChangedEvent;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.logging.LockFactory;
import org.apache.ambari.server.orm.dao.ClusterDAO;
import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.Gson;
+import com.google.gson.JsonSyntaxException;
import com.google.inject.Inject;
-import com.google.inject.Injector;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.assistedinject.AssistedInject;
import com.google.inject.persist.Transactional;
@@ -49,52 +51,113 @@ public class ConfigImpl implements Config {
*/
private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
+ /**
+ * A label for {@link #hostLock} to use with the {@link LockFactory}.
+ */
+ private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
+
public static final String GENERATED_TAG_PREFIX = "generatedTag_";
- private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ private final long configId;
+ private final Cluster cluster;
+ private final StackId stackId;
+ private final String type;
+ private final String tag;
+ private final Long version;
- private Cluster cluster;
- private StackId stackId;
- private String type;
- private volatile String tag;
- private volatile Long version;
- private volatile Map<String, String> properties;
- private volatile Map<String, Map<String, String>> propertiesAttributes;
- private ClusterConfigEntity entity;
- private volatile Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+ /**
+ * The properties of this configuration. This cannot be a
+ * {@link ConcurrentMap} since we allow null values. Therefore, it must be
+ * synchronized externally.
+ */
+ private Map<String, String> properties;
- @Inject
- private ClusterDAO clusterDAO;
+ /**
+ * A lock for reading/writing of {@link #properties} concurrently.
+ *
+ * @see #properties
+ */
+ private final ReadWriteLock propertyLock;
- @Inject
- private Gson gson;
+ /**
+ * The property attributes for this configuration.
+ */
+ private Map<String, Map<String, String>> propertiesAttributes;
+
+ private Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+
+ private final ClusterDAO clusterDAO;
+
+ private final Gson gson;
@Inject
private ServiceConfigDAO serviceConfigDAO;
- @Inject
- private AmbariEventPublisher eventPublisher;
+ private final AmbariEventPublisher eventPublisher;
@AssistedInject
- public ConfigImpl(@Assisted Cluster cluster, @Assisted String type, @Assisted Map<String, String> properties,
- @Assisted Map<String, Map<String, String>> propertiesAttributes, Injector injector) {
+ ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
+ @Assisted("tag") @Nullable String tag,
+ @Assisted Map<String, String> properties,
+ @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+ Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+ propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
this.cluster = cluster;
this.type = type;
this.properties = properties;
- this.propertiesAttributes = propertiesAttributes;
+
+ // only set this if it's non-null
+ this.propertiesAttributes = null == propertiesAttributes ? null
+ : new HashMap<>(propertiesAttributes);
+
+ this.clusterDAO = clusterDAO;
+ this.gson = gson;
+ this.eventPublisher = eventPublisher;
+ version = cluster.getNextConfigVersion(type);
+
+ // tag is nullable from factory but not in the DB, so ensure we generate something
+ tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+ this.tag = tag;
+
+ ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+ ClusterConfigEntity entity = new ClusterConfigEntity();
+ entity.setClusterEntity(clusterEntity);
+ entity.setClusterId(cluster.getClusterId());
+ entity.setType(type);
+ entity.setVersion(version);
+ entity.setTag(this.tag);
+ entity.setTimestamp(System.currentTimeMillis());
+ entity.setStack(clusterEntity.getDesiredStack());
+ entity.setData(gson.toJson(properties));
+
+ if (null != propertiesAttributes) {
+ entity.setAttributes(gson.toJson(propertiesAttributes));
+ }
// when creating a brand new config without a backing entity, use the
// cluster's desired stack as the config's stack
stackId = cluster.getDesiredStackVersion();
-
- injector.injectMembers(this);
propertiesTypes = cluster.getConfigPropertiesTypes(type);
- }
+ persist(entity);
+ configId = entity.getConfigId();
+ }
@AssistedInject
- public ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity, Injector injector) {
+ ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity,
+ ClusterDAO clusterDAO, Gson gson, AmbariEventPublisher eventPublisher,
+ LockFactory lockFactory) {
+ propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
this.cluster = cluster;
+ this.clusterDAO = clusterDAO;
+ this.gson = gson;
+ this.eventPublisher = eventPublisher;
+ configId = entity.getConfigId();
+
type = entity.getType();
tag = entity.getTag();
version = entity.getVersion();
@@ -102,16 +165,71 @@ public class ConfigImpl implements Config {
// when using an existing entity, use the actual value of the entity's stack
stackId = new StackId(entity.getStack());
- this.entity = entity;
- injector.injectMembers(this);
propertiesTypes = cluster.getConfigPropertiesTypes(type);
+
+ // incur the hit on deserialization since this business object is stored locally
+ try {
+ Map<String, String> deserializedProperties = gson.<Map<String, String>> fromJson(
+ entity.getData(), Map.class);
+
+ if (null == deserializedProperties) {
+ deserializedProperties = new HashMap<>();
+ }
+
+ properties = deserializedProperties;
+ } catch (JsonSyntaxException e) {
+ LOG.error("Malformed configuration JSON stored in the database for {}/{}", entity.getType(),
+ entity.getTag());
+ }
+
+ // incur the hit on deserialization since this business object is stored locally
+ try {
+ Map<String, Map<String, String>> deserializedAttributes = gson.<Map<String, Map<String, String>>> fromJson(
+ entity.getAttributes(), Map.class);
+
+ if (null != deserializedAttributes) {
+ propertiesAttributes = new HashMap<>(deserializedAttributes);
+ }
+ } catch (JsonSyntaxException e) {
+ LOG.error("Malformed configuration attribute JSON stored in the database for {}/{}",
+ entity.getType(), entity.getTag());
+ }
}
/**
- * Constructor for clients not using factory.
+ * Constructor. This will create an instance suitable only for
+ * representation/serialization as it is incomplete.
+ *
+ * @param type
+ * @param tag
+ * @param properties
+ * @param propertiesAttributes
+ * @param clusterDAO
+ * @param gson
+ * @param eventPublisher
*/
- public ConfigImpl(String type) {
+ @AssistedInject
+ ConfigImpl(@Assisted("type") String type,
+ @Assisted("tag") @Nullable String tag,
+ @Assisted Map<String, String> properties,
+ @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+ Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+ propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
+ this.tag = tag;
this.type = type;
+ this.properties = new HashMap<>(properties);
+ this.propertiesAttributes = null == propertiesAttributes ? null
+ : new HashMap<>(propertiesAttributes);
+ this.clusterDAO = clusterDAO;
+ this.gson = gson;
+ this.eventPublisher = eventPublisher;
+
+ cluster = null;
+ configId = 0;
+ version = 0L;
+ stackId = null;
}
/**
@@ -119,232 +237,124 @@ public class ConfigImpl implements Config {
*/
@Override
public StackId getStackId() {
- readWriteLock.readLock().lock();
- try {
- return stackId;
- } finally {
- readWriteLock.readLock().unlock();
- }
-
+ return stackId;
}
@Override
public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
- readWriteLock.readLock().lock();
- try {
- return propertiesTypes;
- } finally {
- readWriteLock.readLock().unlock();
- }
+ return propertiesTypes;
}
@Override
public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
- readWriteLock.writeLock().lock();
- try {
- this.propertiesTypes = propertiesTypes;
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- @Override
- public void setStackId(StackId stackId) {
- readWriteLock.writeLock().lock();
- try {
- this.stackId = stackId;
- } finally {
- readWriteLock.writeLock().unlock();
- }
-
+ this.propertiesTypes = propertiesTypes;
}
@Override
public String getType() {
- readWriteLock.readLock().lock();
- try {
- return type;
- } finally {
- readWriteLock.readLock().unlock();
- }
-
+ return type;
}
@Override
public String getTag() {
- if (tag == null) {
- readWriteLock.writeLock().lock();
- try {
- if (tag == null) {
- tag = GENERATED_TAG_PREFIX + getVersion();
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- readWriteLock.readLock().lock();
- try {
-
- return tag;
- } finally {
- readWriteLock.readLock().unlock();
- }
-
+ return tag;
}
@Override
public Long getVersion() {
- if (version == null && cluster != null) {
- readWriteLock.writeLock().lock();
- try {
- if (version == null) {
- version = cluster.getNextConfigVersion(type); //pure DB calculation call, no cluster locking required
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- readWriteLock.readLock().lock();
- try {
- return version;
- } finally {
- readWriteLock.readLock().unlock();
- }
-
+ return version;
}
@Override
public Map<String, String> getProperties() {
- if (null != entity && null == properties) {
- readWriteLock.writeLock().lock();
- try {
- if (properties == null) {
- properties = gson.<Map<String, String>>fromJson(entity.getData(), Map.class);
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- readWriteLock.readLock().lock();
+ propertyLock.readLock().lock();
try {
- return null == properties ? new HashMap<String, String>()
- : new HashMap<String, String>(properties);
+ return properties == null ? new HashMap<String, String>() : new HashMap<>(properties);
} finally {
- readWriteLock.readLock().unlock();
+ propertyLock.readLock().unlock();
}
-
}
@Override
public Map<String, Map<String, String>> getPropertiesAttributes() {
- if (null != entity && null == propertiesAttributes) {
- readWriteLock.writeLock().lock();
- try {
- if (propertiesAttributes == null) {
- propertiesAttributes = gson.<Map<String, Map<String, String>>>fromJson(entity.getAttributes(), Map.class);
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
- }
-
- readWriteLock.readLock().lock();
- try {
- return null == propertiesAttributes ? null : new HashMap<String, Map<String, String>>(propertiesAttributes);
- } finally {
- readWriteLock.readLock().unlock();
- }
-
- }
-
- @Override
- public void setTag(String tag) {
- readWriteLock.writeLock().lock();
- try {
- this.tag = tag;
- } finally {
- readWriteLock.writeLock().unlock();
- }
-
- }
-
- @Override
- public void setVersion(Long version) {
- readWriteLock.writeLock().lock();
- try {
- this.version = version;
- } finally {
- readWriteLock.writeLock().unlock();
- }
-
+ return null == propertiesAttributes ? null
+ : new HashMap<String, Map<String, String>>(propertiesAttributes);
}
@Override
public void setProperties(Map<String, String> properties) {
- readWriteLock.writeLock().lock();
+ propertyLock.writeLock().lock();
try {
this.properties = properties;
} finally {
- readWriteLock.writeLock().unlock();
+ propertyLock.writeLock().unlock();
}
-
}
@Override
public void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes) {
- readWriteLock.writeLock().lock();
- try {
- this.propertiesAttributes = propertiesAttributes;
- } finally {
- readWriteLock.writeLock().unlock();
- }
-
+ this.propertiesAttributes = propertiesAttributes;
}
@Override
- public void updateProperties(Map<String, String> properties) {
- readWriteLock.writeLock().lock();
+ public void updateProperties(Map<String, String> propertiesToUpdate) {
+ propertyLock.writeLock().lock();
try {
- this.properties.putAll(properties);
+ properties.putAll(propertiesToUpdate);
} finally {
- readWriteLock.writeLock().unlock();
+ propertyLock.writeLock().unlock();
}
-
}
@Override
public List<Long> getServiceConfigVersions() {
- readWriteLock.readLock().lock();
- try {
- if (cluster == null || type == null || version == null) {
- return Collections.emptyList();
- }
- return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
- } finally {
- readWriteLock.readLock().unlock();
- }
-
+ return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
}
@Override
- public void deleteProperties(List<String> properties) {
- readWriteLock.writeLock().lock();
+ public void deleteProperties(List<String> propertyKeysToRemove) {
+ propertyLock.writeLock().lock();
try {
- for (String key : properties) {
- this.properties.remove(key);
- }
+ Set<String> keySet = properties.keySet();
+ keySet.removeAll(propertyKeysToRemove);
} finally {
- readWriteLock.writeLock().unlock();
+ propertyLock.writeLock().unlock();
}
+ }
+
+ /**
+ * Persist the entity and update the internal state relationships once the
+ * transaction has been committed.
+ */
+ private void persist(ClusterConfigEntity entity) {
+ persistEntitiesInTransaction(entity);
+ // ensure that the in-memory state of the cluster is kept consistent
+ cluster.addConfig(this);
+
+ // re-load the entity associations for the cluster
+ cluster.refresh();
+
+ // broadcast the change event for the configuration
+ ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+ getType(), getTag(), getVersion());
+
+ eventPublisher.publish(event);
}
- @Override
- public void persist() {
- persist(true);
+ /**
+ * Persist the cluster and configuration entities in their own transaction.
+ */
+ @Transactional
+ void persistEntitiesInTransaction(ClusterConfigEntity entity) {
+ ClusterEntity clusterEntity = entity.getClusterEntity();
+
+ clusterDAO.createConfig(entity);
+ clusterEntity.getClusterConfigEntities().add(entity);
+
+ // save the entity, forcing a flush to ensure the refresh picks up the
+ // newest data
+ clusterDAO.merge(clusterEntity, true);
}
/**
@@ -352,69 +362,29 @@ public class ConfigImpl implements Config {
*/
@Override
@Transactional
- public void persist(boolean newConfig) {
- readWriteLock.writeLock().lock();
- try {
- ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
- if (newConfig) {
- ClusterConfigEntity entity = new ClusterConfigEntity();
- entity.setClusterEntity(clusterEntity);
- entity.setClusterId(cluster.getClusterId());
- entity.setType(getType());
- entity.setVersion(getVersion());
- entity.setTag(getTag());
- entity.setTimestamp(new Date().getTime());
- entity.setStack(clusterEntity.getDesiredStack());
- entity.setData(gson.toJson(getProperties()));
-
- if (null != getPropertiesAttributes()) {
- entity.setAttributes(gson.toJson(getPropertiesAttributes()));
- }
-
- clusterDAO.createConfig(entity);
- clusterEntity.getClusterConfigEntities().add(entity);
-
- // save the entity, forcing a flush to ensure the refresh picks up the
- // newest data
- clusterDAO.merge(clusterEntity, true);
- } else {
- // only supporting changes to the properties
- ClusterConfigEntity entity = null;
-
- // find the existing configuration to update
- for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
- if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
- && getVersion().equals(cfe.getVersion())) {
- entity = cfe;
- break;
- }
- }
-
- // if the configuration was found, then update it
- if (null != entity) {
- LOG.debug(
- "Updating {} version {} with new configurations; a new version will not be created",
- getType(), getVersion());
-
- entity.setData(gson.toJson(getProperties()));
-
- // save the entity, forcing a flush to ensure the refresh picks up the
- // newest data
- clusterDAO.merge(clusterEntity, true);
- }
- }
- } finally {
- readWriteLock.writeLock().unlock();
- }
+ public void save() {
+ ClusterConfigEntity entity = clusterDAO.findConfig(configId);
+ ClusterEntity clusterEntity = clusterDAO.findById(entity.getClusterId());
- // re-load the entity associations for the cluster
- cluster.refresh();
+ // if the configuration was found, then update it
+ if (null != entity) {
+ LOG.debug("Updating {} version {} with new configurations; a new version will not be created",
+ getType(), getVersion());
- // broadcast the change event for the configuration
- ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
- getType(), getTag(), getVersion());
+ entity.setData(gson.toJson(getProperties()));
+
+ // save the entity, forcing a flush to ensure the refresh picks up the
+ // newest data
+ clusterDAO.merge(clusterEntity, true);
+
+ // re-load the entity associations for the cluster
+ cluster.refresh();
+
+ // broadcast the change event for the configuration
+ ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+ getType(), getTag(), getVersion());
eventPublisher.publish(event);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index f87b99c..e223eed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -451,7 +451,7 @@ public class ServiceImpl implements Service {
}
@Transactional
- private void persistEntities(ClusterServiceEntity serviceEntity) {
+ void persistEntities(ClusterServiceEntity serviceEntity) {
long clusterId = cluster.getClusterId();
ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
serviceEntity.setClusterEntity(clusterEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 7bf24ce..b62c834 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -22,6 +22,7 @@ import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
@@ -144,8 +145,10 @@ import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Functions;
import com.google.common.base.Predicate;
import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Maps;
@@ -326,8 +329,11 @@ public class ClusterImpl implements Cluster {
loadStackVersion();
loadServices();
loadServiceHostComponents();
- loadConfigGroups();
+
+ // cache configurations before loading configuration groups
cacheConfigurations();
+ loadConfigGroups();
+
loadRequestExecutions();
if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
@@ -2566,7 +2572,6 @@ public class ClusterImpl implements Cluster {
}
}
configGroup.setHosts(groupDesiredHosts);
- configGroup.persist();
} else {
throw new IllegalArgumentException("Config group {} doesn't exist");
}
@@ -3070,6 +3075,7 @@ public class ClusterImpl implements Cluster {
* {@inheritDoc}
*/
@Override
+ @Transactional
public void applyLatestConfigurations(StackId stackId) {
clusterGlobalLock.writeLock().lock();
@@ -3077,36 +3083,33 @@ public class ClusterImpl implements Cluster {
ClusterEntity clusterEntity = getClusterEntity();
Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
+ // hash them for easier retrieval later - these are the same entity
+ // instances which exist on the cluster entity, so modification of the CCM
+ // entity here will affect the cluster CCM entities as well
+ ImmutableMap<Object, ClusterConfigMappingEntity> ccmMap = Maps.uniqueIndex(configMappingEntities, Functions.identity());
+
// disable all configs
for (ClusterConfigMappingEntity e : configMappingEntities) {
LOG.debug("{} with tag {} is unselected", e.getType(), e.getTag());
e.setSelected(0);
}
- List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+ // work through the in-memory list, finding only the most recent mapping per type
+ Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMappingsForStack(
clusterEntity.getClusterId(), stackId);
- Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = getLatestConfigMapping(
- clusterConfigMappingsForStack);
-
- // loop through all configs and set the latest to enabled for the
- // specified stack
- for(ClusterConfigMappingEntity configMappingEntity: configMappingEntities){
- String type = configMappingEntity.getType();
- String tag = configMappingEntity.getTag();
+ for( ClusterConfigMappingEntity latestConfigMapping : latestConfigMappingByStack ){
+ ClusterConfigMappingEntity mapping = ccmMap.get(latestConfigMapping);
+ mapping.setSelected(1);
- for (ClusterConfigMappingEntity latest : latestConfigMappingByStack) {
- String latestType = latest.getType();
- String latestTag = latest.getTag();
-
- // find the latest config of a given mapping entity
- if (StringUtils.equals(type, latestType) && StringUtils.equals(tag, latestTag)) {
- LOG.info("{} with version tag {} is selected for stack {}", type, tag, stackId.toString());
- configMappingEntity.setSelected(1);
- }
- }
+ LOG.info("Settting {} with version tag {} created on {} to selected for stack {}",
+ mapping.getType(), mapping.getTag(), new Date(mapping.getCreateTimestamp()),
+ stackId.toString());
}
+ // since the entities which were modified came from the cluster entity's
+ // list to begin with, we can just save them right back - no need for a
+ // new collection since the CCM entity instances were modified directly
clusterEntity.setConfigMappingEntities(configMappingEntities);
clusterEntity = clusterDAO.merge(clusterEntity);
clusterDAO.mergeConfigMappings(configMappingEntities);
@@ -3128,23 +3131,60 @@ public class ClusterImpl implements Cluster {
jpaEventPublisher.publish(event);
}
- public Collection<ClusterConfigMappingEntity> getLatestConfigMapping(List<ClusterConfigMappingEntity> clusterConfigMappingEntities){
- Map<String, ClusterConfigMappingEntity> temp = new HashMap<String, ClusterConfigMappingEntity>();
- for (ClusterConfigMappingEntity e : clusterConfigMappingEntities) {
- String type = e.getType();
- if(temp.containsKey(type)){
- ClusterConfigMappingEntity entityStored = temp.get(type);
- Long timestampStored = entityStored.getCreateTimestamp();
- Long timestamp = e.getCreateTimestamp();
- if(timestamp > timestampStored){
- temp.put(type, e); //find a newer config for the given type
- }
- } else {
- temp.put(type, e); //first time encounter a type, add it
+ /**
+ * Retrieves all of the configuration mappings (selected and unselected) for
+ * the specified stack and then iterates through them, returning the most
+ * recent mapping for every type/tag combination.
+ * <p/>
+ * Because of how configuration revert works, mappings can be created for the
+ * same type/tag combinations. The only difference being that the timestamp
+ * reflects when each mapping was created.
+ * <p/>
+ * JPQL cannot be used directly here easily because some databases cannot
+ * support the necessary grouping and IN clause. For example: <br/>
+ *
+ * <pre>
+ * SELECT mapping FROM clusterconfigmappingentity mapping
+ * WHERE (mapping.typename, mapping.createtimestamp) IN
+ * (SELECT latest.typename, MAX(latest.createtimestamp)
+ * FROM clusterconfigmappingentity latest
+ * GROUP BY latest.typename)
+ * </pre>
+ *
+ * @param clusterId
+ * the cluster ID
+ * @param stackId
+ * the stack to retrieve the mappings for (not {@code null}).
+ * @return the most recent mapping (selected or unselected) for the specified
+ * stack for every type.
+ */
+ public Collection<ClusterConfigMappingEntity> getLatestConfigMappingsForStack(long clusterId,
+ StackId stackId) {
+
+ // get all mappings for the specified stack (which could include
+ // duplicates since a config revert creates a duplicate mapping with a
+ // different timestamp)
+ List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = clusterDAO.getClusterConfigMappingsByStack(
+ clusterId, stackId);
+
+ Map<String, ClusterConfigMappingEntity> latestMappingsByType = new HashMap<String, ClusterConfigMappingEntity>();
+ for (ClusterConfigMappingEntity mapping : clusterConfigMappingsForStack) {
+ String type = mapping.getType();
+
+ if (!latestMappingsByType.containsKey(type)) {
+ latestMappingsByType.put(type, mapping);
+ continue;
+ }
+
+ ClusterConfigMappingEntity entityStored = latestMappingsByType.get(type);
+ Long timestampStored = entityStored.getCreateTimestamp();
+ Long timestamp = mapping.getCreateTimestamp();
+ if (timestamp > timestampStored) {
+ latestMappingsByType.put(type, mapping);
}
}
- return temp.values();
+ return latestMappingsByType.values();
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 8e5f37e..1de10f9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -183,7 +183,7 @@ public class ClustersImpl implements Clusters {
*/
@Inject
@Transactional
- private void loadClustersAndHosts() {
+ void loadClustersAndHosts() {
List<HostEntity> hostEntities = hostDAO.findAll();
for (HostEntity hostEntity : hostEntities) {
Host host = hostFactory.create(hostEntity);
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 1b29c9b..5a9c574 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -18,13 +18,13 @@
package org.apache.ambari.server.state.configgroup;
+import java.util.Map;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.ConfigGroupResponse;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.Host;
-import java.util.Map;
-
/**
* Configuration group or Config group is a type of Ambari resource that
* supports grouping of configuration resources and host resources for a
@@ -80,29 +80,20 @@ public interface ConfigGroup {
public void setDescription(String description);
/**
- * List of hosts to which configs are applied
+ * Gets an unmodifiable list of {@link Host}s.
+ *
* @return
*/
public Map<Long, Host> getHosts();
/**
- * List of @Config objects
+ * Gets an unmodifiable map of {@link Config}s.
+ *
* @return
*/
public Map<String, Config> getConfigurations();
/**
- * Persist the Config group along with the related host and config mapping
- * entities to the persistence store
- */
- void persist();
-
- /**
- * Persist the host mapping entity to the persistence store
- */
- void persistHostMapping();
-
- /**
* Delete config group and the related host and config mapping
* entities from the persistence store
*/
@@ -116,13 +107,6 @@ public interface ConfigGroup {
public void addHost(Host host) throws AmbariException;
/**
- * Add config to the config group
- * @param config
- * @throws AmbariException
- */
- public void addConfiguration(Config config) throws AmbariException;
-
- /**
* Return @ConfigGroupResponse for the config group
*
* @return @ConfigGroupResponse
@@ -131,11 +115,6 @@ public interface ConfigGroup {
public ConfigGroupResponse convertToResponse() throws AmbariException;
/**
- * Refresh Config group and the host and config mappings for the group
- */
- public void refresh();
-
- /**
* Reassign the set of hosts associated with this config group
* @param hosts
*/
http://git-wip-us.apache.org/repos/asf/ambari/blob/4278c4a4/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 9abadf3..906d948 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -17,22 +17,38 @@
*/
package org.apache.ambari.server.state.configgroup;
-import com.google.inject.assistedinject.Assisted;
+import java.util.Map;
+
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.configgroup.ConfigGroup;
-import java.util.Map;
+import com.google.inject.assistedinject.Assisted;
public interface ConfigGroupFactory {
- ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
- @Assisted("name") String name,
- @Assisted("tag") String tag,
- @Assisted("description") String description,
- @Assisted("configs") Map<String, Config> configs,
- @Assisted("hosts") Map<Long, Host> hosts);
+ /**
+ * Creates and saves a new {@link ConfigGroup}.
+ *
+ * @param cluster
+ * @param name
+ * @param tag
+ * @param description
+ * @param configs
+ * @param hosts
+ * @param serviceName
+ * @return
+ */
+ ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+ @Assisted("tag") String tag, @Assisted("description") String description,
+ @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
+ /**
+ * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
+ *
+ * @param cluster
+ * @param entity
+ * @return
+ */
ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
}