You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2017/09/15 17:07:08 UTC
[1/2] ambari git commit: AMBARI-21935. Auto fix enhancement to remove
more than 1 selected configs (dlysnichenko)
Repository: ambari
Updated Branches:
refs/heads/branch-2.6 3b55821fc -> e835ca8a7
refs/heads/trunk fed32b85a -> d8e621e5e
AMBARI-21935. Auto fix enhancement to remove more than 1 selected configs (dlysnichenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e835ca8a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e835ca8a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e835ca8a
Branch: refs/heads/branch-2.6
Commit: e835ca8a78056cdc7db7cc68998c38e08c638222
Parents: 3b55821
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Sep 15 16:35:28 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Sep 15 20:04:55 2017 +0300
----------------------------------------------------------------------
.../checks/DatabaseConsistencyCheckHelper.java | 135 ++++++++++++++++---
.../ambari/server/orm/dao/ClusterDAO.java | 22 ++-
.../orm/entities/ClusterConfigEntity.java | 3 +
.../DatabaseConsistencyCheckHelperTest.java | 86 +++++++++++-
4 files changed, 221 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e835ca8a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 575485b..1f94bae 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -97,6 +97,10 @@ public class DatabaseConsistencyCheckHelper {
private static DBAccessor dbAccessor;
private static DatabaseConsistencyCheckResult checkResult = DatabaseConsistencyCheckResult.DB_CHECK_SUCCESS;
+ public static final String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, cc.type_name from clusterconfig cc " +
+ "join clusters c on cc.cluster_id=c.cluster_id " +
+ "group by c.cluster_name, cc.type_name " +
+ "having sum(cc.selected) > 1";
/**
@@ -183,6 +187,7 @@ public class DatabaseConsistencyCheckHelper {
fixClusterConfigsNotMappedToAnyService();
fixConfigGroupHostMappings();
fixConfigGroupsForDeletedServices();
+ fixConfigsSelectedMoreThanOnce();
}
checkSchemaName();
checkMySQLEngine();
@@ -329,7 +334,7 @@ public class DatabaseConsistencyCheckHelper {
warning("Unable to get size for table {}!", tableName);
}
} catch (SQLException ex) {
- error(String.format("Failed to get %s row count: ", tableName), e);
+ warning(String.format("Failed to get %s row count: ", tableName), e);
}
} finally {
if (rs != null) {
@@ -361,10 +366,6 @@ public class DatabaseConsistencyCheckHelper {
static void checkForConfigsSelectedMoreThanOnce() {
LOG.info("Checking for configs selected more than once");
- String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, cc.type_name from clusterconfig cc " +
- "join clusters c on cc.cluster_id=c.cluster_id " +
- "group by c.cluster_name, cc.type_name " +
- "having sum(cc.selected) > 1";
Multimap<String, String> clusterConfigTypeMap = HashMultimap.create();
ResultSet rs = null;
Statement statement = null;
@@ -386,7 +387,56 @@ public class DatabaseConsistencyCheckHelper {
}
} catch (SQLException e) {
- error("Exception occurred during check for config selected more than once procedure: ", e);
+ warning("Exception occurred during check for config selected more than once procedure: ", e);
+ } finally {
+ if (rs != null) {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+ LOG.error("Exception occurred during result set closing procedure: ", e);
+ }
+ }
+
+ if (statement != null) {
+ try {
+ statement.close();
+ } catch (SQLException e) {
+ LOG.error("Exception occurred during statement closing procedure: ", e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Fix inconsistencies found by {@code checkForConfigsSelectedMoreThanOnce}
+ * selecting latest one by selectedTimestamp
+ */
+ @Transactional
+ static void fixConfigsSelectedMoreThanOnce() {
+ LOG.info("Fix configs selected more than once");
+ ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Map<String, Cluster> clusterMap = clusters.getClusters();
+
+
+ Multimap<String, String> clusterConfigTypeMap = HashMultimap.create();
+ ResultSet rs = null;
+ Statement statement = null;
+
+ ensureConnection();
+
+ try {
+ statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
+ rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY);
+ if (rs != null) {
+ while (rs.next()) {
+ clusterConfigTypeMap.put(rs.getString("cluster_name"), rs.getString("type_name"));
+ }
+ }
+
+ } catch (SQLException e) {
+ warning("Exception occurred during check for config selected more than once procedure: ", e);
} finally {
if (rs != null) {
try {
@@ -404,6 +454,47 @@ public class DatabaseConsistencyCheckHelper {
}
}
}
+
+ for (String clusterName : clusterConfigTypeMap.keySet()) {
+ Cluster cluster = null;
+ try {
+ cluster = clusters.getCluster(clusterName);
+
+ Collection<String> typesWithMultipleSelectedConfigs = clusterConfigTypeMap.get(clusterName);
+
+ for (String type: typesWithMultipleSelectedConfigs) {
+ List<ClusterConfigEntity> enabledConfigsByType = getEnabledConfigsByType(cluster.getClusterId(), type);
+ ClusterConfigEntity latestConfig = enabledConfigsByType.get(0);
+ for (ClusterConfigEntity entity : enabledConfigsByType){
+ entity.setSelected(false);
+ if (latestConfig.getSelectedTimestamp() < entity.getSelectedTimestamp()){
+ latestConfig = entity;
+ }
+ clusterDAO.merge(entity, true);
+ }
+ latestConfig.setSelected(true);
+ clusterDAO.merge(latestConfig, true);
+ }
+ } catch (AmbariException e) {
+ warning("Exception occurred during fix for config selected more than once procedure: ", e);
+ }
+ }
+ }
+
+ /**
+ * Find ClusterConfigs with selected = 1
+ * @return ClusterConfigs that are not mapped to Service by type
+ */
+ private static List<ClusterConfigEntity> getEnabledConfigsByType(long clusterId, String type) {
+
+ Provider<EntityManager> entityManagerProvider = injector.getProvider(EntityManager.class);
+ EntityManager entityManager = entityManagerProvider.get();
+
+ Query query = entityManager.createNamedQuery("ClusterConfigEntity.findEnabledConfigByType",ClusterConfigEntity.class);
+ query.setParameter("clusterId", clusterId);
+ query.setParameter("type", type);
+
+ return (List<ClusterConfigEntity>) query.getResultList();
}
/**
@@ -430,12 +521,12 @@ public class DatabaseConsistencyCheckHelper {
}
if (!hostsWithoutStatus.isEmpty()) {
- error("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ","));
+ warning("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ","));
}
}
} catch (SQLException e) {
- error("Exception occurred during check for host without state procedure: ", e);
+ warning("Exception occurred during check for host without state procedure: ", e);
} finally {
if (rs != null) {
try {
@@ -490,7 +581,7 @@ public class DatabaseConsistencyCheckHelper {
int topologyRequestTablesJoinedCount = runQuery(statement, SELECT_JOINED_COUNT_QUERY);
if (topologyRequestCount != topologyRequestTablesJoinedCount) {
- error("Your topology request hierarchy is not complete for each row in topology_request should exist " +
+ warning("Your topology request hierarchy is not complete for each row in topology_request should exist " +
"at least one row in topology_logical_request");
}
@@ -498,12 +589,12 @@ public class DatabaseConsistencyCheckHelper {
int topologyHostRequestTablesJoinedCount = runQuery(statement, SELECT_HOST_JOINED_COUNT_QUERY);
if (topologyHostRequestCount != topologyHostRequestTablesJoinedCount) {
- error("Your topology request hierarchy is not complete for each row in topology_host_request should exist " +
+ warning("Your topology request hierarchy is not complete for each row in topology_host_request should exist " +
"at least one row in topology_host_task, topology_logical_task.");
}
} catch (SQLException e) {
- error("Exception occurred during topology request tables check: ", e);
+ warning("Exception occurred during topology request tables check: ", e);
} finally {
if (statement != null) {
try {
@@ -529,7 +620,7 @@ public class DatabaseConsistencyCheckHelper {
}
} catch (SQLException e) {
- error("Exception occurred during topology request tables check: ", e);
+ warning("Exception occurred during topology request tables check: ", e);
} finally {
if (rs != null) {
try {
@@ -589,10 +680,10 @@ public class DatabaseConsistencyCheckHelper {
}
if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) {
- error("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!");
+ warning("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!");
}
} catch (SQLException e) {
- error("Exception occurred during check for same count of host component states and host component desired states: ", e);
+ warning("Exception occurred during check for same count of host component states and host component desired states: ", e);
} finally {
if (rs != null) {
try {
@@ -822,11 +913,11 @@ public class DatabaseConsistencyCheckHelper {
tablesInfo.add(rs.getString("TABLE_NAME"));
}
if (!tablesInfo.isEmpty()){
- error("Found tables with engine type that is not InnoDB : {}", tablesInfo);
+ warning("Found tables with engine type that is not InnoDB : {}", tablesInfo);
}
}
} catch (SQLException e) {
- error("Exception occurred during checking MySQL engine to be innodb: ", e);
+ warning("Exception occurred during checking MySQL engine to be innodb: ", e);
} finally {
if (rs != null) {
try {
@@ -844,7 +935,7 @@ public class DatabaseConsistencyCheckHelper {
* 2) Check if service has no mapped configs to it's service config id.
* 3) Check if service has all required configs mapped to it.
* 4) Check if service has config which is not selected(has no actual config version) in clusterconfig table.
- * If any issue was discovered, we are showing error message for user.
+ * If any issue was discovered, we are showing warning message for user.
* */
static void checkServiceConfigs() {
LOG.info("Checking services and their configs");
@@ -923,7 +1014,7 @@ public class DatabaseConsistencyCheckHelper {
for (String clName : clusterServiceVersionMap.keySet()) {
Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clName);
for (String servName : serviceVersion.keySet()) {
- error("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ","));
+ warning("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ","));
}
}
@@ -1034,7 +1125,7 @@ public class DatabaseConsistencyCheckHelper {
}
if (!serviceConfigsFromStack.isEmpty()) {
- error("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}",
+ warning("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}",
StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName);
}
}
@@ -1072,13 +1163,13 @@ public class DatabaseConsistencyCheckHelper {
for (String clusterName : clusterServiceConfigType.keySet()) {
Multimap<String, String> serviceConfig = clusterServiceConfigType.get(clusterName);
for (String serviceName : serviceConfig.keySet()) {
- error("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName);
+ warning("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName);
}
}
} catch (SQLException e) {
- error("Exception occurred during complex service check procedure: ", e);
+ warning("Exception occurred during complex service check procedure: ", e);
} catch (AmbariException e) {
- error("Exception occurred during complex service check procedure: ", e);
+ warning("Exception occurred during complex service check procedure: ", e);
} finally {
if (rs != null) {
try {
http://git-wip-us.apache.org/repos/asf/ambari/blob/e835ca8a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index a23b914..7dec445 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -341,9 +341,29 @@ public class ClusterDAO {
* the entity to merge (not {@code null}).
* @return the managed entity which was merged (never {@code null}).
*/
+ @Transactional
public ClusterConfigEntity merge(ClusterConfigEntity clusterConfigEntity) {
+ return merge(clusterConfigEntity, false);
+ }
+
+ /**
+ * Merge the specified entity into the current persistence context.
+ *
+ * @param clusterConfigEntity
+ * the entity to merge (not {@code null}).
+ * @param flush
+ * if {@code true} then {@link EntityManager#flush()} will be invoked
+ * immediately after the merge.
+ * @return the managed entity which was merged (never {@code null}).
+ */
+ @Transactional
+ public ClusterConfigEntity merge(ClusterConfigEntity clusterConfigEntity, boolean flush) {
EntityManager entityManager = entityManagerProvider.get();
- return entityManager.merge(clusterConfigEntity);
+ ClusterConfigEntity clusterConfigEntityRes = entityManager.merge(clusterConfigEntity);
+ if(flush) {
+ entityManager.flush();
+ }
+ return clusterConfigEntityRes;
}
@Transactional
http://git-wip-us.apache.org/repos/asf/ambari/blob/e835ca8a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 9bf03b3..da944f5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -71,6 +71,9 @@ import org.apache.commons.lang.builder.EqualsBuilder;
name = "ClusterConfigEntity.findEnabledConfigByType",
query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1 and config.type = :type"),
@NamedQuery(
+ name = "ClusterConfigEntity.findEnabledConfigs",
+ query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1"),
+ @NamedQuery(
name = "ClusterConfigEntity.findEnabledConfigsByTypes",
query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1 and config.type in :types") })
http://git-wip-us.apache.org/repos/asf/ambari/blob/e835ca8a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index 6c6c00f..23dd1e6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -19,6 +19,9 @@ package org.apache.ambari.server.checks;
import static com.google.common.collect.Lists.newArrayList;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.createStrictMock;
@@ -33,16 +36,20 @@ import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.stack.StackManagerFactory;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
@@ -155,13 +162,13 @@ public class DatabaseConsistencyCheckHelperTest {
@Test
public void testCheckTopologyTablesAreConsistent() throws Exception {
testCheckTopologyTablesConsistent(2);
- Assert.assertFalse(DatabaseConsistencyCheckHelper.getLastCheckResult().isError());
+ Assert.assertFalse(DatabaseConsistencyCheckHelper.getLastCheckResult().isErrorOrWarning());
}
@Test
public void testCheckTopologyTablesAreNotConsistent() throws Exception {
testCheckTopologyTablesConsistent(1);
- Assert.assertTrue(DatabaseConsistencyCheckHelper.getLastCheckResult().isError());
+ Assert.assertTrue(DatabaseConsistencyCheckHelper.getLastCheckResult().isErrorOrWarning());
}
private void testCheckTopologyTablesConsistent(int resultCount) throws Exception {
@@ -747,4 +754,79 @@ public class DatabaseConsistencyCheckHelperTest {
Assert.assertEquals(1, configGroups.size());
Assert.assertEquals(1L, configGroups.values().iterator().next().getId().longValue());
}
+
+ @Test
+ public void testFixConfigsSelectedMoreThanOnce() throws Exception {
+ EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+ final Connection mockConnection = easyMockSupport.createNiceMock(Connection.class);
+ final ClusterDAO clusterDAO = easyMockSupport.createNiceMock(ClusterDAO.class);
+ final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
+
+ final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
+ final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
+ final ResultSet mockResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+ final Statement mockStatement = easyMockSupport.createNiceMock(Statement.class);
+
+ final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
+ final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
+
+ final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(EntityManager.class).toInstance(mockEntityManager);
+ bind(Clusters.class).toInstance(mockClusters);
+ bind(ClusterDAO.class).toInstance(clusterDAO);
+ bind(DBAccessor.class).toInstance(mockDBDbAccessor);
+ bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
+ bind(OsFamily.class).toInstance(mockOSFamily);
+ }
+ });
+
+
+ expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
+ expect(mockStatement.executeQuery("select c.cluster_name, cc.type_name from clusterconfig cc " +
+ "join clusters c on cc.cluster_id=c.cluster_id " +
+ "group by c.cluster_name, cc.type_name " +
+ "having sum(cc.selected) > 1")).andReturn(mockResultSet);
+ expect(mockResultSet.next()).andReturn(true).once();
+ expect(mockResultSet.getString("cluster_name")).andReturn("123").once();
+ expect(mockResultSet.getString("type_name")).andReturn("type1").once();
+ expect(mockResultSet.next()).andReturn(false).once();
+
+ Cluster clusterMock = easyMockSupport.createNiceMock(Cluster.class);
+ expect(mockClusters.getCluster("123")).andReturn(clusterMock);
+
+ expect(clusterMock.getClusterId()).andReturn(123L).once();
+
+ ClusterConfigEntity clusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+ ClusterConfigEntity clusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+ expect(clusterConfigEntity1.getType()).andReturn("type1").anyTimes();
+ expect(clusterConfigEntity1.getSelectedTimestamp()).andReturn(123L);
+ clusterConfigEntity1.setSelected(false);
+ expectLastCall().once();
+
+ expect(clusterConfigEntity2.getType()).andReturn("type1").anyTimes();
+ expect(clusterConfigEntity2.getSelectedTimestamp()).andReturn(321L);
+ clusterConfigEntity2.setSelected(false);
+ expectLastCall().once();
+ clusterConfigEntity2.setSelected(true);
+ expectLastCall().once();
+
+ TypedQuery queryMock = easyMockSupport.createNiceMock(TypedQuery.class);
+ expect(mockEntityManager.createNamedQuery(anyString(), anyObject(Class.class))).andReturn(queryMock).anyTimes();
+ expect(queryMock.setParameter(anyString(), anyString())).andReturn(queryMock).once();
+ expect(queryMock.setParameter(anyString(), anyLong())).andReturn(queryMock).once();
+ expect(queryMock.getResultList()).andReturn(Arrays.asList(clusterConfigEntity1, clusterConfigEntity2)).once();
+ expect(clusterDAO.merge(anyObject(ClusterConfigEntity.class), anyBoolean())).andReturn(null).times(3);
+
+ DatabaseConsistencyCheckHelper.setInjector(mockInjector);
+ DatabaseConsistencyCheckHelper.setConnection(mockConnection);
+
+ easyMockSupport.replayAll();
+
+ DatabaseConsistencyCheckHelper.fixConfigsSelectedMoreThanOnce();
+
+ easyMockSupport.verifyAll();
+ }
}
[2/2] ambari git commit: AMBARI-21935. Auto fix enhancement to remove
more than 1 selected configs (dlysnichenko)
Posted by dm...@apache.org.
AMBARI-21935. Auto fix enhancement to remove more than 1 selected configs (dlysnichenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d8e621e5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d8e621e5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d8e621e5
Branch: refs/heads/trunk
Commit: d8e621e5ed292f3fb644f8f93f22695540278e08
Parents: fed32b8
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Sep 15 20:05:59 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Sep 15 20:05:59 2017 +0300
----------------------------------------------------------------------
.../checks/DatabaseConsistencyCheckHelper.java | 128 ++++++++++++++++---
.../ambari/server/orm/dao/ClusterDAO.java | 22 +++-
.../orm/entities/ClusterConfigEntity.java | 3 +
.../DatabaseConsistencyCheckHelperTest.java | 84 ++++++++++++
4 files changed, 221 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8e621e5/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 054c470..34888f2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -40,6 +40,7 @@ import java.util.regex.Pattern;
import javax.annotation.Nullable;
import javax.inject.Provider;
import javax.persistence.EntityManager;
+import javax.persistence.Query;
import javax.persistence.TypedQuery;
import org.apache.ambari.server.AmbariException;
@@ -59,6 +60,8 @@ import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
import org.apache.ambari.server.orm.entities.MetainfoEntity;
import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
import org.apache.ambari.server.state.ClientConfigFileDefinition;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ComponentInfo;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.State;
@@ -92,6 +95,10 @@ public class DatabaseConsistencyCheckHelper {
private static StageDAO stageDAO;
private static DatabaseConsistencyCheckResult checkResult = DatabaseConsistencyCheckResult.DB_CHECK_SUCCESS;
+ public static final String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, cc.type_name from clusterconfig cc " +
+ "join clusters c on cc.cluster_id=c.cluster_id " +
+ "group by c.cluster_name, cc.type_name " +
+ "having sum(cc.selected) > 1";
/**
* @return The result of the DB cheks run so far.
@@ -174,6 +181,7 @@ public class DatabaseConsistencyCheckHelper {
if (fixIssues) {
fixHostComponentStatesCountEqualsHostComponentsDesiredStates();
fixClusterConfigsNotMappedToAnyService();
+ fixConfigsSelectedMoreThanOnce();
}
checkSchemaName();
checkMySQLEngine();
@@ -317,7 +325,7 @@ public class DatabaseConsistencyCheckHelper {
warning("Unable to get size for table {}!", tableName);
}
} catch (SQLException ex) {
- error(String.format("Failed to get %s row count: ", tableName), e);
+ warning(String.format("Failed to get %s row count: ", tableName), e);
}
} finally {
if (rs != null) {
@@ -376,7 +384,7 @@ public class DatabaseConsistencyCheckHelper {
}
} catch (SQLException e) {
- error("Exception occurred during check for config selected more than once procedure: ", e);
+ warning("Exception occurred during check for config selected more than once procedure: ", e);
} finally {
if (rs != null) {
try {
@@ -420,12 +428,12 @@ public class DatabaseConsistencyCheckHelper {
}
if (!hostsWithoutStatus.isEmpty()) {
- error("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ","));
+ warning("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ","));
}
}
} catch (SQLException e) {
- error("Exception occurred during check for host without state procedure: ", e);
+ warning("Exception occurred during check for host without state procedure: ", e);
} finally {
if (rs != null) {
try {
@@ -458,7 +466,7 @@ public class DatabaseConsistencyCheckHelper {
}
} catch (SQLException e) {
- error("Exception occurred during topology request tables check: ", e);
+ warning("Exception occurred during topology request tables check: ", e);
} finally {
if (rs != null) {
try {
@@ -523,7 +531,7 @@ public class DatabaseConsistencyCheckHelper {
}
if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) {
- error("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!");
+ warning("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!");
}
@@ -535,11 +543,11 @@ public class DatabaseConsistencyCheckHelper {
}
for (Map.Entry<String, String> component : hostComponentStateDuplicates.entrySet()) {
- error("Component {} on host with id {}, has more than one host component state (hostcomponentstate table)!", component.getKey(), component.getValue());
+ warning("Component {} on host with id {}, has more than one host component state (hostcomponentstate table)!", component.getKey(), component.getValue());
}
} catch (SQLException e) {
- error("Exception occurred during check for same count of host component states and host component desired states: ", e);
+ warning("Exception occurred during check for same count of host component states and host component desired states: ", e);
} finally {
if (rs != null) {
try {
@@ -774,11 +782,11 @@ public class DatabaseConsistencyCheckHelper {
tablesInfo.add(rs.getString("TABLE_NAME"));
}
if (!tablesInfo.isEmpty()){
- error("Found tables with engine type that is not InnoDB : {}", tablesInfo);
+ warning("Found tables with engine type that is not InnoDB : {}", tablesInfo);
}
}
} catch (SQLException e) {
- error("Exception occurred during checking MySQL engine to be innodb: ", e);
+ warning("Exception occurred during checking MySQL engine to be innodb: ", e);
} finally {
if (rs != null) {
try {
@@ -791,12 +799,102 @@ public class DatabaseConsistencyCheckHelper {
}
/**
+ * Fix inconsistencies found by {@code checkForConfigsSelectedMoreThanOnce}
+ * selecting latest one by selectedTimestamp
+ */
+ @Transactional
+ static void fixConfigsSelectedMoreThanOnce() {
+ LOG.info("Fix configs selected more than once");
+ ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+
+ Clusters clusters = injector.getInstance(Clusters.class);
+ Map<String, Cluster> clusterMap = clusters.getClusters();
+
+
+ Multimap<String, String> clusterConfigTypeMap = HashMultimap.create();
+ ResultSet rs = null;
+ Statement statement = null;
+
+ ensureConnection();
+
+ try {
+ statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
+ rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY);
+ if (rs != null) {
+ while (rs.next()) {
+ clusterConfigTypeMap.put(rs.getString("cluster_name"), rs.getString("type_name"));
+ }
+ }
+
+ } catch (SQLException e) {
+ warning("Exception occurred during check for config selected more than once procedure: ", e);
+ } finally {
+ if (rs != null) {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+ LOG.error("Exception occurred during result set closing procedure: ", e);
+ }
+ }
+
+ if (statement != null) {
+ try {
+ statement.close();
+ } catch (SQLException e) {
+ LOG.error("Exception occurred during statement closing procedure: ", e);
+ }
+ }
+ }
+
+ for (String clusterName : clusterConfigTypeMap.keySet()) {
+ Cluster cluster = null;
+ try {
+ cluster = clusters.getCluster(clusterName);
+
+ Collection<String> typesWithMultipleSelectedConfigs = clusterConfigTypeMap.get(clusterName);
+
+ for (String type: typesWithMultipleSelectedConfigs) {
+ List<ClusterConfigEntity> enabledConfigsByType = getEnabledConfigsByType(cluster.getClusterId(), type);
+ ClusterConfigEntity latestConfig = enabledConfigsByType.get(0);
+ for (ClusterConfigEntity entity : enabledConfigsByType){
+ entity.setSelected(false);
+ if (latestConfig.getSelectedTimestamp() < entity.getSelectedTimestamp()){
+ latestConfig = entity;
+ }
+ clusterDAO.merge(entity, true);
+ }
+ latestConfig.setSelected(true);
+ clusterDAO.merge(latestConfig, true);
+ }
+ } catch (AmbariException e) {
+ warning("Exception occurred during fix for config selected more than once procedure: ", e);
+ }
+ }
+ }
+
+ /**
+ * Find ClusterConfigs with selected = 1
+ * @return ClusterConfigs that are not mapped to Service by type
+ */
+ private static List<ClusterConfigEntity> getEnabledConfigsByType(long clusterId, String type) {
+
+ Provider<EntityManager> entityManagerProvider = injector.getProvider(EntityManager.class);
+ EntityManager entityManager = entityManagerProvider.get();
+
+ Query query = entityManager.createNamedQuery("ClusterConfigEntity.findEnabledConfigByType",ClusterConfigEntity.class);
+ query.setParameter("clusterId", clusterId);
+ query.setParameter("type", type);
+
+ return (List<ClusterConfigEntity>) query.getResultList();
+ }
+
+ /**
* This method checks several potential problems for services:
* 1) Check if we have services in cluster which doesn't have service config id(not available in serviceconfig table).
* 2) Check if service has no mapped configs to it's service config id.
* 3) Check if service has all required configs mapped to it.
* 4) Check if service has config which is not selected(has no actual config version)
- * If any issue was discovered, we are showing error message for user.
+ * If any issue was discovered, we are showing warning message for user.
* */
static void checkServiceConfigs() {
LOG.info("Checking services and their configs");
@@ -875,7 +973,7 @@ public class DatabaseConsistencyCheckHelper {
for (String clName : clusterServiceVersionMap.keySet()) {
Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clName);
for (String servName : serviceVersion.keySet()) {
- error("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ","));
+ warning("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ","));
}
}
@@ -986,7 +1084,7 @@ public class DatabaseConsistencyCheckHelper {
}
if (!serviceConfigsFromStack.isEmpty()) {
- error("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}",
+ warning("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}",
StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName);
}
}
@@ -1024,11 +1122,11 @@ public class DatabaseConsistencyCheckHelper {
for (String clusterName : clusterServiceConfigType.keySet()) {
Multimap<String, String> serviceConfig = clusterServiceConfigType.get(clusterName);
for (String serviceName : serviceConfig.keySet()) {
- error("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName);
+ warning("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName);
}
}
} catch (SQLException | AmbariException e) {
- error("Exception occurred during complex service check procedure: ", e);
+ warning("Exception occurred during complex service check procedure: ", e);
} finally {
if (rs != null) {
try {
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8e621e5/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index d0f8d0b..a1b6fbe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -356,9 +356,29 @@ public class ClusterDAO {
* the entity to merge (not {@code null}).
* @return the managed entity which was merged (never {@code null}).
*/
+ @Transactional
public ClusterConfigEntity merge(ClusterConfigEntity clusterConfigEntity) {
+ return merge(clusterConfigEntity, false);
+ }
+
+ /**
+ * Merge the specified entity into the current persistence context.
+ *
+ * @param clusterConfigEntity
+ * the entity to merge (not {@code null}).
+ * @param flush
+ * if {@code true} then {@link EntityManager#flush()} will be invoked
+ * immediately after the merge.
+ * @return the managed entity which was merged (never {@code null}).
+ */
+ @Transactional
+ public ClusterConfigEntity merge(ClusterConfigEntity clusterConfigEntity, boolean flush) {
EntityManager entityManager = entityManagerProvider.get();
- return entityManager.merge(clusterConfigEntity);
+ ClusterConfigEntity clusterConfigEntityRes = entityManager.merge(clusterConfigEntity);
+ if(flush) {
+ entityManager.flush();
+ }
+ return clusterConfigEntityRes;
}
@Transactional
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8e621e5/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 0c5276c..beecf4e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -74,6 +74,9 @@ import org.apache.commons.lang.builder.EqualsBuilder;
name = "ClusterConfigEntity.findEnabledConfigByType",
query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1 and config.type = :type"),
@NamedQuery(
+ name = "ClusterConfigEntity.findEnabledConfigs",
+ query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1"),
+ @NamedQuery(
name = "ClusterConfigEntity.findEnabledConfigsByTypes",
query = "SELECT config FROM ClusterConfigEntity config WHERE config.clusterId = :clusterId AND config.selected = 1 and config.type in :types") })
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8e621e5/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index ce7b783..3db174c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -19,10 +19,14 @@ package org.apache.ambari.server.checks;
import static com.google.common.collect.Lists.newArrayList;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.createNiceMock;
import static org.easymock.EasyMock.createStrictMock;
import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.replay;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -32,16 +36,21 @@ import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -541,4 +550,79 @@ public class DatabaseConsistencyCheckHelperTest {
easyMockSupport.verifyAll();
}
+
+ @Test
+ public void testFixConfigsSelectedMoreThanOnce() throws Exception {
+ EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+ final Connection mockConnection = easyMockSupport.createNiceMock(Connection.class);
+ final ClusterDAO clusterDAO = easyMockSupport.createNiceMock(ClusterDAO.class);
+ final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
+
+ final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
+ final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
+ final ResultSet mockResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+ final Statement mockStatement = easyMockSupport.createNiceMock(Statement.class);
+
+ final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
+ final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
+
+ final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(EntityManager.class).toInstance(mockEntityManager);
+ bind(Clusters.class).toInstance(mockClusters);
+ bind(ClusterDAO.class).toInstance(clusterDAO);
+ bind(DBAccessor.class).toInstance(mockDBDbAccessor);
+ bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
+ bind(OsFamily.class).toInstance(mockOSFamily);
+ }
+ });
+
+
+ expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
+ expect(mockStatement.executeQuery("select c.cluster_name, cc.type_name from clusterconfig cc " +
+ "join clusters c on cc.cluster_id=c.cluster_id " +
+ "group by c.cluster_name, cc.type_name " +
+ "having sum(cc.selected) > 1")).andReturn(mockResultSet);
+ expect(mockResultSet.next()).andReturn(true).once();
+ expect(mockResultSet.getString("cluster_name")).andReturn("123").once();
+ expect(mockResultSet.getString("type_name")).andReturn("type1").once();
+ expect(mockResultSet.next()).andReturn(false).once();
+
+ Cluster clusterMock = easyMockSupport.createNiceMock(Cluster.class);
+ expect(mockClusters.getCluster("123")).andReturn(clusterMock);
+
+ expect(clusterMock.getClusterId()).andReturn(123L).once();
+
+ ClusterConfigEntity clusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+ ClusterConfigEntity clusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+ expect(clusterConfigEntity1.getType()).andReturn("type1").anyTimes();
+ expect(clusterConfigEntity1.getSelectedTimestamp()).andReturn(123L);
+ clusterConfigEntity1.setSelected(false);
+ expectLastCall().once();
+
+ expect(clusterConfigEntity2.getType()).andReturn("type1").anyTimes();
+ expect(clusterConfigEntity2.getSelectedTimestamp()).andReturn(321L);
+ clusterConfigEntity2.setSelected(false);
+ expectLastCall().once();
+ clusterConfigEntity2.setSelected(true);
+ expectLastCall().once();
+
+ TypedQuery queryMock = easyMockSupport.createNiceMock(TypedQuery.class);
+ expect(mockEntityManager.createNamedQuery(anyString(), anyObject(Class.class))).andReturn(queryMock).anyTimes();
+ expect(queryMock.setParameter(anyString(), anyString())).andReturn(queryMock).once();
+ expect(queryMock.setParameter(anyString(), anyLong())).andReturn(queryMock).once();
+ expect(queryMock.getResultList()).andReturn(Arrays.asList(clusterConfigEntity1, clusterConfigEntity2)).once();
+ expect(clusterDAO.merge(anyObject(ClusterConfigEntity.class), anyBoolean())).andReturn(null).times(3);
+
+ DatabaseConsistencyCheckHelper.setInjector(mockInjector);
+ DatabaseConsistencyCheckHelper.setConnection(mockConnection);
+
+ easyMockSupport.replayAll();
+
+ DatabaseConsistencyCheckHelper.fixConfigsSelectedMoreThanOnce();
+
+ easyMockSupport.verifyAll();
+ }
}