You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2017/10/11 08:38:42 UTC

[4/7] ambari git commit: AMBARI-22190. After merging trunk to branch-3.0-perf some parts of code are missing. (mpapirkovskyy)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index d1de998..c31469e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -21,6 +21,8 @@ import static org.apache.ambari.server.view.ViewContextImpl.CORE_SITE;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -29,11 +31,24 @@ import javax.persistence.EntityManager;
 import javax.persistence.Query;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.ArtifactEntity;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -70,6 +85,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
 
   public static final String REPO_VERSION_TABLE = "repo_version";
   public static final String REPO_VERSION_ID_COLUMN = "repo_version_id";
+  public static final String REPO_VERSION_RESOLVED_COLUMN = "resolved";
   public static final String REPO_VERSION_HIDDEN_COLUMN = "hidden";
 
   public static final String HOST_COMPONENT_DESIRED_STATE_TABLE = "hostcomponentdesiredstate";
@@ -97,6 +113,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   public static final String FK_UPGRADE_FROM_REPO_ID = "FK_upgrade_from_repo_id";
   public static final String FK_UPGRADE_TO_REPO_ID = "FK_upgrade_to_repo_id";
   public static final String FK_UPGRADE_REPO_VERSION_ID = "FK_upgrade_repo_version_id";
+  public static final String UPGRADE_ITEM_ITEM_TEXT = "item_text";
 
   public static final String SERVICE_COMPONENT_HISTORY_TABLE = "servicecomponent_history";
   public static final String UPGRADE_HISTORY_TABLE = "upgrade_history";
@@ -115,6 +132,9 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   public static final String HOST_COMPONENT_DESIRED_STATE = "hostcomponentdesiredstate";
   public static final String HOST_COMPONENT_STATE = "hostcomponentstate";
 
+  public static final String AMS_SSL_CLIENT = "ams-ssl-client";
+  public static final String METRIC_TRUSTSTORE_ALIAS = "ssl.client.truststore.alias";
+
   /**
    * Logger.
    */
@@ -123,6 +143,12 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   public static final String NOT_REQUIRED = "NOT_REQUIRED";
   public static final String CURRENT = "CURRENT";
   public static final String SELECTED = "1";
+  public static final String VIEWURL_TABLE = "viewurl";
+  public static final String PK_VIEWURL = "PK_viewurl";
+  public static final String URL_ID_COLUMN = "url_id";
+  public static final String STALE_POSTGRESS_VIEWURL_PKEY = "viewurl_pkey";
+  public static final String USERS_TABLE = "users";
+  public static final String STALE_POSTGRESS_USERS_LDAP_USER_KEY = "users_ldap_user_key";
 
 
   /**
@@ -168,6 +194,34 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
     createUpgradeHistoryTable();
     updateRepositoryVersionTable();
     renameServiceDeletedColumn();
+    expandUpgradeItemItemTextColumn();
+    addViewUrlPKConstraint();
+    removeStaleConstraints();
+  }
+
+
+  /**
+   * Updates {@value #VIEWURL_TABLE} table.
+   * Adds the {@value #PK_VIEWURL} constraint.
+   */
+  private void addViewUrlPKConstraint() throws SQLException {
+    dbAccessor.dropPKConstraint(VIEWURL_TABLE, STALE_POSTGRESS_VIEWURL_PKEY);
+    dbAccessor.addPKConstraint(VIEWURL_TABLE, PK_VIEWURL, URL_ID_COLUMN);
+  }
+
+  /**
+   * remove stale unnamed constraints
+   */
+  private void removeStaleConstraints() throws SQLException {
+    dbAccessor.dropUniqueConstraint(USERS_TABLE, STALE_POSTGRESS_USERS_LDAP_USER_KEY);
+  }
+
+  /**
+   * Expand item_text column of upgrade_item
+   */
+  private void expandUpgradeItemItemTextColumn() throws SQLException {
+    dbAccessor.changeColumnType(UPGRADE_ITEM_TABLE, UPGRADE_ITEM_ITEM_TEXT,
+      String.class, char[].class);
   }
 
   private void renameServiceDeletedColumn() throws AmbariException, SQLException {
@@ -360,14 +414,20 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   }
 
   /**
-   * Updates {@value #REPO_VERSION_TABLE} table. Adds
-   * {@value #REPO_VERSION_HIDDEN_COLUMN} column.
+   * Updates {@value #REPO_VERSION_TABLE} table. Adds the following columns:
+   * <ul>
+   * <li>{@value #REPO_VERSION_HIDDEN_COLUMN}
+   * <li>{@value #REPO_VERSION_RESOLVED_COLUMN}
+   * </ul>
    *
    * @throws java.sql.SQLException
    */
   private void updateRepositoryVersionTable() throws SQLException {
     dbAccessor.addColumn(REPO_VERSION_TABLE,
         new DBAccessor.DBColumnInfo(REPO_VERSION_HIDDEN_COLUMN, Short.class, null, 0, false));
+
+    dbAccessor.addColumn(REPO_VERSION_TABLE,
+        new DBAccessor.DBColumnInfo(REPO_VERSION_RESOLVED_COLUMN, Short.class, null, 0, false));
   }
 
   /**
@@ -375,7 +435,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
+    removeSupersetFromDruid();
   }
 
   /**
@@ -385,8 +445,11 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     addNewConfigurationsFromXml();
     setUnmappedForOrphanedConfigs();
-    removeSupersetFromDruid();
     ensureZeppelinProxyUserConfigs();
+    updateKerberosDescriptorArtifacts();
+    updateAmsConfigs();
+    updateHDFSWidgetDefinition();
+    updateExistingRepositoriesToBeResolved();
   }
 
   public int getCurrentVersionID() throws AmbariException, SQLException {
@@ -495,4 +558,270 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
       }
     }
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
+    if (artifactEntity != null) {
+      Map<String, Object> data = artifactEntity.getArtifactData();
+      if (data != null) {
+        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
+        if (kerberosDescriptor != null) {
+          fixRangerKMSKerberosDescriptor(kerberosDescriptor);
+          fixIdentityReferences(getCluster(artifactEntity), kerberosDescriptor);
+
+          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
+          artifactDAO.merge(artifactEntity);
+        }
+      }
+    }
+  }
+
+  protected void fixRangerKMSKerberosDescriptor(KerberosDescriptor kerberosDescriptor) {
+    KerberosServiceDescriptor rangerKmsServiceDescriptor = kerberosDescriptor.getService("RANGER_KMS");
+    if (rangerKmsServiceDescriptor != null) {
+
+      KerberosIdentityDescriptor rangerKmsServiceIdentity = rangerKmsServiceDescriptor.getIdentity("/smokeuser");
+      if (rangerKmsServiceIdentity != null) {
+        rangerKmsServiceDescriptor.removeIdentity("/smokeuser");
+      }
+      KerberosComponentDescriptor rangerKmscomponentDescriptor = rangerKmsServiceDescriptor.getComponent("RANGER_KMS_SERVER");
+      if (rangerKmscomponentDescriptor != null) {
+        KerberosIdentityDescriptor rangerKmsComponentIdentity = rangerKmscomponentDescriptor.getIdentity("/smokeuser");
+        if (rangerKmsComponentIdentity != null) {
+          rangerKmscomponentDescriptor.removeIdentity("/smokeuser");
+        }
+      }
+    }
+  }
+
+  protected void updateAmsConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+
+
+          Config amsSslClient = cluster.getDesiredConfigByType(AMS_SSL_CLIENT);
+          if (amsSslClient != null) {
+            Map<String, String> amsSslClientProperties = amsSslClient.getProperties();
+
+            if (amsSslClientProperties.containsKey(METRIC_TRUSTSTORE_ALIAS)) {
+              LOG.info("Removing " + METRIC_TRUSTSTORE_ALIAS + " from " + AMS_SSL_CLIENT);
+              removeConfigurationPropertiesFromCluster(cluster, AMS_SSL_CLIENT, Collections.singleton(METRIC_TRUSTSTORE_ALIAS));
+            }
+
+          }
+        }
+      }
+    }
+  }
+
+  protected void updateHDFSWidgetDefinition() throws AmbariException {
+    LOG.info("Updating HDFS widget definition.");
+
+    Map<String, List<String>> widgetMap = new HashMap<>();
+    Map<String, String> sectionLayoutMap = new HashMap<>();
+
+    List<String> hdfsHeatmapWidgets = new ArrayList<>(Arrays.asList("HDFS Bytes Read", "HDFS Bytes Written",
+      "DataNode Process Disk I/O Utilization", "DataNode Process Network I/O Utilization"));
+    widgetMap.put("HDFS_HEATMAPS", hdfsHeatmapWidgets);
+    sectionLayoutMap.put("HDFS_HEATMAPS", "default_hdfs_heatmap");
+
+    updateWidgetDefinitionsForService("HDFS", widgetMap, sectionLayoutMap);
+  }
+
+  /**
+   * Retrieves the relevant {@link Cluster} given information from the suppliied {@link ArtifactEntity}.
+   * <p>
+   * The cluster id value is taken from the entity's foreign key value and then used to obtain the cluster object.
+   *
+   * @param artifactEntity an {@link ArtifactEntity}
+   * @return a {@link Cluster}
+   */
+  private Cluster getCluster(ArtifactEntity artifactEntity) {
+    if (artifactEntity != null) {
+      Map<String, String> keys = artifactEntity.getForeignKeys();
+      if (keys != null) {
+        String clusterId = keys.get("cluster");
+        if (StringUtils.isNumeric(clusterId)) {
+          Clusters clusters = injector.getInstance(Clusters.class);
+          try {
+            return clusters.getCluster(Long.valueOf(clusterId));
+          } catch (AmbariException e) {
+            LOG.error(String.format("Failed to obtain cluster using cluster id %s -  %s", clusterId, e.getMessage()), e);
+          }
+        } else {
+          LOG.error(String.format("Failed to obtain cluster id from artifact entity with foreign keys: %s", keys));
+        }
+      }
+    }
+
+    return null;
+  }
+
+  /**
+   * Recursively traverses the Kerberos descriptor to find and fix the identity references.
+   * <p>
+   * Each found identity descriptor that indicates it is a reference by having a <code>name</code>
+   * value that starts with a "/" or a "./" is fixed by clearing the <code>principal name</code>value,
+   * setting the <code>reference</code> value to the <code>name</code> value and changing the
+   * <code>name</code> value to a name with the following pattern:
+   * <code>SERVICE_COMPONENT_IDENTITY</code>
+   * <p>
+   * For example, if the identity is for the "SERVICE1" service and is a reference to "HDFS/NAMENODE/hdfs";
+   * then the name is set to "<code>service1_hdfs</code>"
+   * <p>
+   * For example, if the identity is for the "COMPONENT21" component of the "SERVICE2" service and is a reference to "HDFS/NAMENODE/hdfs";
+   * then the name is set to "<code>service2_component21_hdfs</code>"
+   * <p>
+   * Once the identity descriptor properties of the identity are fixed, the relevant configuration
+   * value is fixed to match the value if the referenced identity. This may lead to a new version
+   * of the relevant configuration type.
+   *
+   * @param cluster   the cluster
+   * @param container the current Kerberos descriptor container
+   * @throws AmbariException if an error occurs
+   */
+  private void fixIdentityReferences(Cluster cluster, AbstractKerberosDescriptorContainer container)
+      throws AmbariException {
+    List<KerberosIdentityDescriptor> identities = container.getIdentities();
+    if (identities != null) {
+      for (KerberosIdentityDescriptor identity : identities) {
+        String name = identity.getName();
+
+        if (!StringUtils.isEmpty(name) && (name.startsWith("/") || name.startsWith("./"))) {
+          String[] parts = name.split("/");
+          String newName = buildName(identity.getParent(), parts[parts.length - 1]);
+
+          identity.setName(newName);
+          identity.setReference(name);
+        }
+
+        String identityReference = identity.getReference();
+        if (!StringUtils.isEmpty(identityReference)) {
+          // If this identity references another identity:
+          //  * The principal name needs to be the same as the referenced identity
+          //    - ensure that no principal name is being set for this identity
+          //  * Any configuration set to contain the reference principal name needs to be fixed to
+          //    be the correct principal name
+          KerberosPrincipalDescriptor principal = identity.getPrincipalDescriptor();
+          if (principal != null) {
+            // Fix the value
+            principal.setValue(null);
+
+            // Fix the relative configuration
+            if (!StringUtils.isEmpty(principal.getConfiguration())) {
+              String referencedPrincipalName = getConfiguredPrincipalNameFromReference(cluster, container, identityReference);
+
+              if(!StringUtils.isEmpty(referencedPrincipalName)) {
+                String[] parts = principal.getConfiguration().split("/");
+                if (parts.length == 2) {
+                  String type = parts[0];
+                  String property = parts[1];
+
+                  updateConfigurationPropertiesForCluster(cluster,
+                      type,
+                      Collections.singletonMap(property, referencedPrincipalName),
+                      true,
+                      false);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+
+    if (container instanceof KerberosDescriptor) {
+      Map<String, KerberosServiceDescriptor> services = ((KerberosDescriptor) container).getServices();
+      if (services != null) {
+        for (KerberosServiceDescriptor serviceDescriptor : services.values()) {
+          fixIdentityReferences(cluster, serviceDescriptor);
+        }
+      }
+    } else if (container instanceof KerberosServiceDescriptor) {
+      Map<String, KerberosComponentDescriptor> components = ((KerberosServiceDescriptor) container).getComponents();
+      if (components != null) {
+        for (KerberosComponentDescriptor componentDescriptor : components.values()) {
+          fixIdentityReferences(cluster, componentDescriptor);
+        }
+      }
+    }
+  }
+
+  /**
+   * Finds the value of the configuration found for the principal in the referenced identity
+   * descriptor.
+   *
+   * @param cluster           the cluster
+   * @param container         the current {@link KerberosIdentityDescriptor}, ideally the identity's parent descriptor
+   * @param identityReference the path to the referenced identity
+   * @return the value of the configuration specified in the referenced identity's principal descriptor
+   * @throws AmbariException if an error occurs
+   */
+  private String getConfiguredPrincipalNameFromReference(Cluster cluster,
+                                                         AbstractKerberosDescriptorContainer container,
+                                                         String identityReference)
+      throws AmbariException {
+    KerberosIdentityDescriptor identityDescriptor = container.getReferencedIdentityDescriptor(identityReference);
+
+    if (identityDescriptor != null) {
+      KerberosPrincipalDescriptor principal = identityDescriptor.getPrincipalDescriptor();
+      if ((principal != null) && (!StringUtils.isEmpty(principal.getConfiguration()))) {
+        String[] parts = principal.getConfiguration().split("/");
+        if (parts.length == 2) {
+          String type = parts[0];
+          String property = parts[1];
+
+          Config config = cluster.getDesiredConfigByType(type);
+
+          if (config != null) {
+            return config.getProperties().get(property);
+          }
+        }
+      }
+    }
+
+    return null;
+  }
+
+  /**
+   * Builds the name of an identity based on the identity's container and the referenced identity's name.
+   * <p>
+   * The calculated name will be in the following format and converted to all lowercase characters:
+   * <code>SERVICE_COMPONENT_IDENTITY</code>
+   *
+   * @param container    the current {@link KerberosIdentityDescriptor}, ideally the identity's parent descriptor
+   * @param identityName the referenced identity's name
+   * @return a name
+   */
+  private String buildName(AbstractKerberosDescriptor container, String identityName) {
+    if (container instanceof KerberosServiceDescriptor) {
+      return container.getName().toLowerCase() + "_" + identityName;
+    } else if (container instanceof KerberosComponentDescriptor) {
+      return container.getParent().getName().toLowerCase() + "_" + container.getName().toLowerCase() + "_" + identityName;
+    } else {
+      return identityName;
+    }
+  }
+
+  /**
+   * Sets all existing repository versions to be resolved (we have to assume
+   * that they are good since they've been using them to run stuff).
+   *
+   * @throws AmbariException
+   */
+  protected void updateExistingRepositoriesToBeResolved() throws AmbariException {
+    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
+    List<RepositoryVersionEntity> repositoryVersions = repositoryVersionDAO.findAll();
+    for (RepositoryVersionEntity repositoryVersion : repositoryVersions) {
+      repositoryVersion.setResolved(true);
+      repositoryVersionDAO.merge(repositoryVersion);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/java/org/apache/ambari/server/utils/ManagedThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/ManagedThreadPoolExecutor.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/ManagedThreadPoolExecutor.java
new file mode 100644
index 0000000..3979c0e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/ManagedThreadPoolExecutor.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.utils;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * ThreadPoolExecutor extension which is stopped by default and can be started & stopped.
+ */
+public class ManagedThreadPoolExecutor extends ThreadPoolExecutor {
+
+  private volatile boolean isStopped;
+  private final ReentrantLock pauseLock = new ReentrantLock();
+  private final Condition unpaused = pauseLock.newCondition();
+
+  public ManagedThreadPoolExecutor(int corePoolSize, int maximumPoolSize,
+                            long keepAliveTime, TimeUnit unit,
+                            BlockingQueue<Runnable> workQueue) {
+    super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
+            Executors.defaultThreadFactory());
+    isStopped = true;
+  }
+
+  protected void beforeExecute(Thread t, Runnable r) {
+    super.beforeExecute(t, r);
+    pauseLock.lock();
+    try {
+      while (isStopped) {
+        unpaused.await();
+      }
+    } catch (InterruptedException ie) {
+      t.interrupt();
+    } finally {
+      pauseLock.unlock();
+    }
+  }
+
+  public void start() {
+    pauseLock.lock();
+    try {
+      isStopped = false;
+      unpaused.signalAll();
+    } finally {
+      pauseLock.unlock();
+    }
+  }
+
+  public void stop() {
+    pauseLock.lock();
+    try {
+      isStopped = true;
+    } finally {
+      pauseLock.unlock();
+    }
+  }
+
+  public boolean isRunning() {
+    return !isStopped;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
index 8f0cb67..7c004b3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-AzureDB-CREATE.sql
@@ -222,6 +222,7 @@ CREATE TABLE repo_version (
   repositories VARCHAR(MAX) NOT NULL,
   repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
   hidden SMALLINT NOT NULL DEFAULT 0,
+  resolved BIT NOT NULL DEFAULT 0,
   version_url VARCHAR(1024),
   version_xml VARCHAR(MAX),
   version_xsd VARCHAR(512),
@@ -1169,7 +1170,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
   hosts TEXT,
   tasks TEXT,
-  item_text VARCHAR(1024),
+  item_text TEXT,
   CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 5b03df5..28a7624 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -155,6 +155,7 @@ CREATE TABLE repo_version (
   repositories VARCHAR(3000) NOT NULL,
   repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
   hidden SMALLINT NOT NULL DEFAULT 0,
+  resolved SMALLINT NOT NULL DEFAULT 0,
   version_url VARCHAR(1024),
   version_xml CLOB,
   version_xsd VARCHAR(512),
@@ -844,7 +845,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
   hosts VARCHAR(3000),
   tasks VARCHAR(3000),
-  item_text VARCHAR(1024),
+  item_text VARCHAR(3000),
   CONSTRAINT PK_upgrade_item PRIMARY KEY (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 );

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 4d49dca..e7774e9 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -156,6 +156,7 @@ CREATE TABLE repo_version (
   repositories CLOB NOT NULL,
   repo_type VARCHAR2(255) DEFAULT 'STANDARD' NOT NULL,
   hidden NUMBER(1) DEFAULT 0 NOT NULL,
+  resolved NUMBER(1) DEFAULT 0 NOT NULL,
   version_url VARCHAR(1024),
   version_xml CLOB,
   version_xsd VARCHAR(512),
@@ -840,7 +841,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR2(255) DEFAULT 'NONE' NOT NULL,
   hosts CLOB,
   tasks CLOB,
-  item_text VARCHAR2(1024),
+  item_text CLOB,
   CONSTRAINT PK_upgrade_item PRIMARY KEY (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 );

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 4d084e8..6e7c172 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -159,6 +159,7 @@ CREATE TABLE repo_version (
   version_xml TEXT,
   version_xsd VARCHAR(512),
   parent_id BIGINT,
+  resolved SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_repo_version PRIMARY KEY (repo_version_id),
   CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
   CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name),
@@ -842,7 +843,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
   hosts TEXT,
   tasks TEXT,
-  item_text VARCHAR(1024),
+  item_text TEXT,
   CONSTRAINT PK_upgrade_item PRIMARY KEY (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 );

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index a19ca73..a64856d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -154,6 +154,7 @@ CREATE TABLE repo_version (
   repositories TEXT NOT NULL,
   repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
   hidden SMALLINT NOT NULL DEFAULT 0,
+  resolved BIT NOT NULL DEFAULT 0,
   version_url VARCHAR(1024),
   version_xml TEXT,
   version_xsd VARCHAR(512),
@@ -838,7 +839,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
   hosts TEXT,
   tasks TEXT,
-  item_text VARCHAR(1024),
+  item_text TEXT,
   CONSTRAINT PK_upgrade_item PRIMARY KEY (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 );

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 96fd7fc..c0f36c0 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -168,6 +168,7 @@ CREATE TABLE repo_version (
   repositories VARCHAR(MAX) NOT NULL,
   repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL,
   hidden SMALLINT NOT NULL DEFAULT 0,
+  resolved BIT NOT NULL DEFAULT 0,
   version_url VARCHAR(1024),
   version_xml VARCHAR(MAX),
   version_xsd VARCHAR(512),
@@ -859,7 +860,7 @@ CREATE TABLE upgrade_item (
   state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
   hosts TEXT,
   tasks TEXT,
-  item_text VARCHAR(1024),
+  item_text TEXT,
   CONSTRAINT PK_upgrade_item PRIMARY KEY CLUSTERED (upgrade_item_id),
   FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
 );

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index c449aae..94799cc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@ -132,7 +132,7 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testActionResponse() throws AmbariException {
     String hostname = "host1";
-    populateActionDB(db, hostname, requestId, stageId);
+    populateActionDB(db, hostname, requestId, stageId, false);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());
     stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.QUEUED);
@@ -160,7 +160,7 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testCancelCommandReport() throws AmbariException {
     String hostname = "host1";
-    populateActionDB(db, hostname, requestId, stageId);
+    populateActionDB(db, hostname, requestId, stageId, false);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());
     stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.ABORTED);
@@ -191,8 +191,8 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testGetStagesInProgress() throws AmbariException {
     List<Stage> stages = new ArrayList<>();
-    stages.add(createStubStage(hostName, requestId, stageId));
-    stages.add(createStubStage(hostName, requestId, stageId + 1));
+    stages.add(createStubStage(hostName, requestId, stageId, false));
+    stages.add(createStubStage(hostName, requestId, stageId + 1, false));
     Request request = new Request(stages, "", clusters);
     db.persistActions(request);
     assertEquals(2, stages.size());
@@ -200,8 +200,8 @@ public class TestActionDBAccessorImpl {
 
   @Test
   public void testGetStagesInProgressWithFailures() throws AmbariException {
-    populateActionDB(db, hostName, requestId, stageId);
-    populateActionDB(db, hostName, requestId + 1, stageId);
+    populateActionDB(db, hostName, requestId, stageId, false);
+    populateActionDB(db, hostName, requestId + 1, stageId, false);
     List<Stage> stages = db.getFirstStageInProgressPerRequest();
     assertEquals(2, stages.size());
 
@@ -289,7 +289,7 @@ public class TestActionDBAccessorImpl {
 
   @Test
   public void testPersistActions() throws AmbariException {
-    populateActionDB(db, hostName, requestId, stageId);
+    populateActionDB(db, hostName, requestId, stageId, false);
     for (Stage stage : db.getAllStages(requestId)) {
       log.info("taskId={}" + stage.getExecutionCommands(hostName).get(0).
           getExecutionCommand().getTaskId());
@@ -302,7 +302,7 @@ public class TestActionDBAccessorImpl {
 
   @Test
   public void testHostRoleScheduled() throws InterruptedException, AmbariException {
-    populateActionDB(db, hostName, requestId, stageId);
+    populateActionDB(db, hostName, requestId, stageId, false);
     Stage stage = db.getStage(StageUtils.getActionId(requestId, stageId));
     assertEquals(HostRoleStatus.PENDING, stage.getHostRoleStatus(hostName, Role.HBASE_MASTER.toString()));
     List<HostRoleCommandEntity> entities=
@@ -421,7 +421,7 @@ public class TestActionDBAccessorImpl {
 
   @Test
   public void testUpdateHostRole() throws Exception {
-    populateActionDB(db, hostName, requestId, stageId);
+    populateActionDB(db, hostName, requestId, stageId, false);
     StringBuilder sb = new StringBuilder();
     for (int i = 0; i < 50000; i++) {
       sb.append("1234567890");
@@ -452,13 +452,36 @@ public class TestActionDBAccessorImpl {
   }
 
   @Test
+  public void testUpdateHostRoleTimeoutRetry() throws Exception {
+    populateActionDB(db, hostName, requestId, stageId, true);
+
+    CommandReport commandReport = new CommandReport();
+    commandReport.setStatus(HostRoleStatus.TIMEDOUT.toString());
+    commandReport.setStdOut("");
+    commandReport.setStdErr("");
+    commandReport.setStructuredOut("");
+    commandReport.setExitCode(123);
+    db.updateHostRoleState(hostName, requestId, stageId, Role.HBASE_MASTER.toString(), commandReport);
+
+    List<HostRoleCommandEntity> commandEntities =
+      hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
+
+    HostRoleCommandEntity commandEntity = commandEntities.get(0);
+    HostRoleCommand command = db.getTask(commandEntity.getTaskId());
+    assertNotNull(command);
+    assertEquals(HostRoleStatus.HOLDING_TIMEDOUT, command.getStatus());
+
+  }
+
+
+  @Test
   public void testGetRequestsByStatus() throws AmbariException {
     List<Long> requestIds = new ArrayList<>();
     requestIds.add(requestId + 1);
     requestIds.add(requestId);
-    populateActionDB(db, hostName, requestId, stageId);
+    populateActionDB(db, hostName, requestId, stageId, false);
     clusters.addHost("host2");
-    populateActionDB(db, hostName, requestId + 1, stageId);
+    populateActionDB(db, hostName, requestId + 1, stageId, false);
     List<Long> requestIdsResult =
       db.getRequestsByStatus(null, BaseRequest.DEFAULT_PAGE_SIZE, false);
 
@@ -508,7 +531,7 @@ public class TestActionDBAccessorImpl {
     }
 
     for (Long id : ids) {
-      populateActionDB(db, hostName, id, stageId);
+      populateActionDB(db, hostName, id, stageId, false);
     }
 
     List<Long> expected = null;
@@ -617,7 +640,7 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testEntitiesCreatedWithIDs() throws Exception {
     List<Stage> stages = new ArrayList<>();
-    Stage stage = createStubStage(hostName, requestId, stageId);
+    Stage stage = createStubStage(hostName, requestId, stageId, false);
 
     stages.add(stage);
 
@@ -707,8 +730,8 @@ public class TestActionDBAccessorImpl {
   }
 
   private void populateActionDB(ActionDBAccessor db, String hostname,
-      long requestId, long stageId) throws AmbariException {
-    Stage s = createStubStage(hostname, requestId, stageId);
+      long requestId, long stageId, boolean retryAllowed) throws AmbariException {
+    Stage s = createStubStage(hostname, requestId, stageId, retryAllowed);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
     Request request = new Request(stages, "", clusters);
@@ -721,7 +744,7 @@ public class TestActionDBAccessorImpl {
 
     List<Stage> stages = new ArrayList<>();
     for (int i = 0; i < numberOfStages; i++) {
-      Stage stage = createStubStage(hostname, requestId, stageId + i);
+      Stage stage = createStubStage(hostname, requestId, stageId + i, false);
       stages.add(stage);
     }
 
@@ -732,7 +755,7 @@ public class TestActionDBAccessorImpl {
   private void populateActionDBWithCompletedRequest(ActionDBAccessor db, String hostname,
       long requestId, long stageId) throws AmbariException {
 
-    Stage s = createStubStage(hostname, requestId, stageId);
+    Stage s = createStubStage(hostname, requestId, stageId, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
     Request request = new Request(stages, "", clusters);
@@ -745,7 +768,7 @@ public class TestActionDBAccessorImpl {
   private void populateActionDBWithPartiallyCompletedRequest(ActionDBAccessor db, String hostname,
       long requestId, long stageId) throws AmbariException {
 
-    Stage s = createStubStage(hostname, requestId, stageId);
+    Stage s = createStubStage(hostname, requestId, stageId, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
 
@@ -756,14 +779,14 @@ public class TestActionDBAccessorImpl {
     db.persistActions(request);
   }
 
-  private Stage createStubStage(String hostname, long requestId, long stageId) {
+  private Stage createStubStage(String hostname, long requestId, long stageId, boolean retryAllowed) {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
       "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
         new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            hostname, System.currentTimeMillis()), "cluster1", "HBASE", false, false);
+            hostname, System.currentTimeMillis()), "cluster1", "HBASE", retryAllowed, false);
     s.addHostRoleExecutionCommand(
         hostname,
         Role.HBASE_REGIONSERVER,

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 92b8429..71da8a9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -30,6 +30,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS_CLIENT;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.SECONDARY_NAMENODE;
+import static org.apache.ambari.server.controller.KerberosHelperImpl.SET_KEYTAB;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
@@ -1474,7 +1475,7 @@ public class TestHeartbeatHandler {
     ExecutionCommand executionCommand = new ExecutionCommand();
 
     Map<String, String> hlp = new HashMap<>();
-    hlp.put("custom_command", "SET_KEYTAB");
+    hlp.put("custom_command", SET_KEYTAB);
     executionCommand.setHostLevelParams(hlp);
 
     Map<String, String> commandparams = new HashMap<>();
@@ -1547,7 +1548,7 @@ public class TestHeartbeatHandler {
     kerberosIdentityDataFileWriter.writeRecord("c6403.ambari.apache.org", "HDFS", "DATANODE",
         "dn/_HOST@_REALM", "service",
         "/etc/security/keytabs/dn.service.keytab",
-        "hdfs", "r", "hadoop", "", "false");
+        "hdfs", "r", "hadoop", "", "false", "false");
 
     kerberosIdentityDataFileWriter.close();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index b72dff2..3bd5fac 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.agent;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -35,6 +37,7 @@ import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -55,6 +58,8 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostDisableEve
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartedEvent;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -86,6 +91,8 @@ public class TestHeartbeatMonitor {
     injector.getInstance(GuiceJpaInitializer.class);
     helper = injector.getInstance(OrmTestHelper.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
   }
 
   @After
@@ -197,6 +204,8 @@ public class TestHeartbeatMonitor {
     hb.setResponseId(12);
     handler.handleHeartBeat(hb);
 
+    hm.getAgentRequests().setExecutionDetailsRequest(hostname1, "DATANODE", Boolean.TRUE.toString());
+
     List<StatusCommand> cmds = hm.generateStatusCommands(hostname1);
     assertTrue("HeartbeatMonitor should generate StatusCommands for host1", cmds.size() == 3);
     assertEquals("HDFS", cmds.get(0).getServiceName());
@@ -205,10 +214,19 @@ public class TestHeartbeatMonitor {
     boolean  containsSECONDARY_NAMENODEStatus = false;
 
     for (StatusCommand cmd : cmds) {
-      containsDATANODEStatus |= cmd.getComponentName().equals("DATANODE");
+      boolean isDataNode = cmd.getComponentName().equals("DATANODE");
+      containsDATANODEStatus |= isDataNode;
       containsNAMENODEStatus |= cmd.getComponentName().equals("NAMENODE");
       containsSECONDARY_NAMENODEStatus |= cmd.getComponentName().equals("SECONDARY_NAMENODE");
       assertTrue(cmd.getConfigurations().size() > 0);
+
+      ExecutionCommand execCmd = cmd.getExecutionCommand();
+      assertEquals(isDataNode, execCmd != null);
+      if (execCmd != null) {
+        Map<String, String> commandParams = execCmd.getCommandParams();
+        assertTrue(SERVICE_PACKAGE_FOLDER + " should be included", commandParams.containsKey(SERVICE_PACKAGE_FOLDER));
+        assertTrue(HOOKS_FOLDER + " should be included", commandParams.containsKey(HOOKS_FOLDER));
+      }
     }
 
     assertEquals(true, containsDATANODEStatus);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index ce7b783..3db174c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -19,10 +19,14 @@ package org.apache.ambari.server.checks;
 
 
 import static com.google.common.collect.Lists.newArrayList;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -32,16 +36,21 @@ import java.sql.DatabaseMetaData;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
 
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.OsFamily;
@@ -541,4 +550,79 @@ public class DatabaseConsistencyCheckHelperTest {
 
     easyMockSupport.verifyAll();
   }
+
+  @Test
+  public void testFixConfigsSelectedMoreThanOnce() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    final Connection mockConnection = easyMockSupport.createNiceMock(Connection.class);
+    final ClusterDAO clusterDAO = easyMockSupport.createNiceMock(ClusterDAO.class);
+    final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
+
+    final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
+    final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
+    final ResultSet mockResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+    final Statement mockStatement = easyMockSupport.createNiceMock(Statement.class);
+
+    final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
+    final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(EntityManager.class).toInstance(mockEntityManager);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(ClusterDAO.class).toInstance(clusterDAO);
+        bind(DBAccessor.class).toInstance(mockDBDbAccessor);
+        bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
+        bind(OsFamily.class).toInstance(mockOSFamily);
+      }
+    });
+
+
+    expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
+    expect(mockStatement.executeQuery("select c.cluster_name, cc.type_name from clusterconfig cc " +
+        "join clusters c on cc.cluster_id=c.cluster_id " +
+        "group by c.cluster_name, cc.type_name " +
+        "having sum(cc.selected) > 1")).andReturn(mockResultSet);
+    expect(mockResultSet.next()).andReturn(true).once();
+    expect(mockResultSet.getString("cluster_name")).andReturn("123").once();
+    expect(mockResultSet.getString("type_name")).andReturn("type1").once();
+    expect(mockResultSet.next()).andReturn(false).once();
+
+    Cluster clusterMock = easyMockSupport.createNiceMock(Cluster.class);
+    expect(mockClusters.getCluster("123")).andReturn(clusterMock);
+
+    expect(clusterMock.getClusterId()).andReturn(123L).once();
+
+    ClusterConfigEntity clusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+    ClusterConfigEntity clusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
+    expect(clusterConfigEntity1.getType()).andReturn("type1").anyTimes();
+    expect(clusterConfigEntity1.getSelectedTimestamp()).andReturn(123L);
+    clusterConfigEntity1.setSelected(false);
+    expectLastCall().once();
+
+    expect(clusterConfigEntity2.getType()).andReturn("type1").anyTimes();
+    expect(clusterConfigEntity2.getSelectedTimestamp()).andReturn(321L);
+    clusterConfigEntity2.setSelected(false);
+    expectLastCall().once();
+    clusterConfigEntity2.setSelected(true);
+    expectLastCall().once();
+
+    TypedQuery queryMock = easyMockSupport.createNiceMock(TypedQuery.class);
+    expect(mockEntityManager.createNamedQuery(anyString(), anyObject(Class.class))).andReturn(queryMock).anyTimes();
+    expect(queryMock.setParameter(anyString(), anyString())).andReturn(queryMock).once();
+    expect(queryMock.setParameter(anyString(), anyLong())).andReturn(queryMock).once();
+    expect(queryMock.getResultList()).andReturn(Arrays.asList(clusterConfigEntity1, clusterConfigEntity2)).once();
+    expect(clusterDAO.merge(anyObject(ClusterConfigEntity.class), anyBoolean())).andReturn(null).times(3);
+
+    DatabaseConsistencyCheckHelper.setInjector(mockInjector);
+    DatabaseConsistencyCheckHelper.setConnection(mockConnection);
+
+    easyMockSupport.replayAll();
+
+    DatabaseConsistencyCheckHelper.fixConfigsSelectedMoreThanOnce();
+
+    easyMockSupport.verifyAll();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java
new file mode 100644
index 0000000..d88c9a1
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.repository.ClusterVersionSummary;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.inject.Provider;
+
+/**
+ * Unit tests for SecondaryNamenodeDeletedCheck
+ *
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class DruidHighAvailabilityCheckTest
+{
+  private final Clusters clusters = Mockito.mock(Clusters.class);
+
+  private final DruidHighAvailabilityCheck druidHighAvailabilityCheck = new DruidHighAvailabilityCheck();
+
+  @Mock
+  private ClusterVersionSummary m_clusterVersionSummary;
+
+  @Mock
+  private VersionDefinitionXml m_vdfXml;
+
+  @Mock
+  private RepositoryVersionEntity m_repositoryVersion;
+
+  final Map<String, Service> m_services = new HashMap<>();
+
+  @Before
+  public void setup() throws Exception {
+    druidHighAvailabilityCheck.clustersProvider = new Provider<Clusters>() {
+      @Override
+      public Clusters get() {
+        return clusters;
+      }
+    };
+
+    druidHighAvailabilityCheck.ambariMetaInfo = new Provider<AmbariMetaInfo>() {
+      @Override
+      public AmbariMetaInfo get() {
+        return Mockito.mock(AmbariMetaInfo.class);
+      }
+    };
+
+    Configuration config = Mockito.mock(Configuration.class);
+    druidHighAvailabilityCheck.config = config;
+
+    m_services.clear();
+    Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml);
+    Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary);
+    Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet());
+  }
+
+  @Test
+  public void testIsApplicable() throws Exception {
+    final Cluster cluster = Mockito.mock(Cluster.class);
+    final Service service = Mockito.mock(Service.class);
+
+    m_services.put("DRUID", service);
+
+    Mockito.when(cluster.getClusterId()).thenReturn(1L);
+    Mockito.when(cluster.getServices()).thenReturn(m_services);
+    Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
+
+    PrereqCheckRequest request = new PrereqCheckRequest("cluster");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
+
+    Assert.assertTrue(druidHighAvailabilityCheck.isApplicable(request));
+
+    request = new PrereqCheckRequest("cluster");
+    request.setTargetRepositoryVersion(m_repositoryVersion);
+
+    request.addResult(CheckDescription.DRUID_HA_WARNING, PrereqCheckStatus.PASS);
+    Assert.assertTrue(druidHighAvailabilityCheck.isApplicable(request));
+
+    m_services.remove("DRUID");
+    Assert.assertFalse(druidHighAvailabilityCheck.isApplicable(request));
+  }
+
+  @Test
+  public void testPerform() throws Exception {
+    final ServiceComponentHost serviceComponentHost= Mockito.mock(ServiceComponentHost.class);
+    final Cluster cluster = Mockito.mock(Cluster.class);
+    Mockito.when(cluster.getClusterId()).thenReturn(1L);
+    Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
+
+    final Service service = Mockito.mock(Service.class);
+    final ServiceComponent serviceComponent = Mockito.mock(ServiceComponent.class);
+    final ServiceComponent haComponent = Mockito.mock(ServiceComponent.class);
+    Mockito.when(serviceComponent.getServiceComponentHosts()).thenReturn(Collections.singletonMap("host", null));
+    Mockito.when(haComponent.getServiceComponentHosts()).thenReturn(ImmutableMap.<String,ServiceComponentHost>of("host1", serviceComponentHost, "host2", serviceComponentHost));
+
+    // All Components Not HA
+    Mockito.when(cluster.getService("DRUID")).thenReturn(service);
+    Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(serviceComponent);
+    PrerequisiteCheck check = new PrerequisiteCheck(null, null);
+    druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster"));
+    Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus());
+    Assert.assertEquals("DRUID", check.getFailedOn().toArray(new String[1])[0]);
+    Assert.assertEquals("High Availability is not enabled for Druid. Druid Service may have some downtime during upgrade. Deploy multiple instances of DRUID_BROKER, DRUID_COORDINATOR, DRUID_HISTORICAL, DRUID_OVERLORD, DRUID_MIDDLEMANAGER, DRUID_ROUTER in the Cluster to avoid any downtime.", check.getFailReason());
+
+    // Some Components have HA
+    Mockito.when(cluster.getService("DRUID")).thenReturn(service);
+    Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(serviceComponent);
+    Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(haComponent);
+    check = new PrerequisiteCheck(null, null);
+    druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster"));
+    Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus());
+    Assert.assertEquals("DRUID", check.getFailedOn().toArray(new String[1])[0]);
+    Assert.assertEquals("High Availability is not enabled for Druid. Druid Service may have some downtime during upgrade. Deploy multiple instances of DRUID_COORDINATOR, DRUID_OVERLORD, DRUID_MIDDLEMANAGER in the Cluster to avoid any downtime.", check.getFailReason());
+
+    // All components have HA
+    Mockito.when(cluster.getService("DRUID")).thenReturn(service);
+    Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(haComponent);
+    Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(haComponent);
+
+
+    check = new PrerequisiteCheck(null, null);
+    druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster"));
+    Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
index 55eeb4e..ff585fc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
@@ -28,14 +28,12 @@ import java.util.Collections;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO.LastServiceCheckDTO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -58,7 +56,6 @@ public class ServiceCheckValidityCheckTest {
   private static final long CLUSTER_ID = 1L;
   private static final String SERVICE_NAME = "HDFS";
   private static final long CONFIG_CREATE_TIMESTAMP = 1461518722202L;
-  private static final String COMMAND_DETAIL = "HDFS service check";
   private static final long SERVICE_CHECK_START_TIME = CONFIG_CREATE_TIMESTAMP - 2000L;
   private static final String SERVICE_COMPONENT_NAME = "service component";
   private ServiceCheckValidityCheck serviceCheckValidityCheck;
@@ -67,6 +64,7 @@ public class ServiceCheckValidityCheckTest {
   private HostRoleCommandDAO hostRoleCommandDAO;
   private Service service;
   private AmbariMetaInfo ambariMetaInfo;
+  private ActionMetadata actionMetadata;
 
   @Before
   public void setUp() throws Exception {
@@ -75,6 +73,7 @@ public class ServiceCheckValidityCheckTest {
     serviceConfigDAO = mock(ServiceConfigDAO.class);
     hostRoleCommandDAO = mock(HostRoleCommandDAO.class);
     ambariMetaInfo = mock(AmbariMetaInfo.class);
+    actionMetadata = new ActionMetadata();
 
     serviceCheckValidityCheck = new ServiceCheckValidityCheck();
     serviceCheckValidityCheck.hostRoleCommandDAOProvider = new Provider<HostRoleCommandDAO>() {
@@ -95,6 +94,12 @@ public class ServiceCheckValidityCheckTest {
         return clusters;
       }
     };
+    serviceCheckValidityCheck.actionMetadataProvider = new Provider<ActionMetadata>() {
+      @Override
+      public ActionMetadata get() {
+        return actionMetadata;
+      }
+    };
 
     Cluster cluster = mock(Cluster.class);
     when(clusters.getCluster(CLUSTER_NAME)).thenReturn(cluster);
@@ -114,6 +119,8 @@ public class ServiceCheckValidityCheckTest {
 
     when(ambariMetaInfo.isServiceWithNoConfigs(Mockito.anyString(), Mockito.anyString(),
         Mockito.anyString())).thenReturn(false);
+
+    actionMetadata.addServiceCheckAction("HDFS");
   }
 
   @Test
@@ -128,20 +135,11 @@ public class ServiceCheckValidityCheckTest {
     serviceConfigEntity.setServiceName(SERVICE_NAME);
     serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
 
-    HostRoleCommandEntity hostRoleCommandEntity1 = new HostRoleCommandEntity();
-    hostRoleCommandEntity1.setRoleCommand(RoleCommand.SERVICE_CHECK);
-    hostRoleCommandEntity1.setCommandDetail(null);
-    hostRoleCommandEntity1.setStartTime(SERVICE_CHECK_START_TIME);
-    hostRoleCommandEntity1.setRole(Role.ZOOKEEPER_SERVER);
-
-    HostRoleCommandEntity hostRoleCommandEntity2 = new HostRoleCommandEntity();
-    hostRoleCommandEntity2.setRoleCommand(RoleCommand.SERVICE_CHECK);
-    hostRoleCommandEntity2.setCommandDetail(COMMAND_DETAIL);
-    hostRoleCommandEntity2.setStartTime(SERVICE_CHECK_START_TIME);
-    hostRoleCommandEntity2.setRole(Role.HDFS_SERVICE_CHECK);
+    LastServiceCheckDTO lastServiceCheckDTO1 = new LastServiceCheckDTO(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
+    LastServiceCheckDTO lastServiceCheckDTO2 = new LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
 
     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-    when(hostRoleCommandDAO.findAll(any(Request.class), any(Predicate.class))).thenReturn(asList(hostRoleCommandEntity1, hostRoleCommandEntity2));
+    when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1, lastServiceCheckDTO2));
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
     try {
@@ -164,14 +162,10 @@ public class ServiceCheckValidityCheckTest {
     serviceConfigEntity.setServiceName(SERVICE_NAME);
     serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
 
-    HostRoleCommandEntity hostRoleCommandEntity = new HostRoleCommandEntity();
-    hostRoleCommandEntity.setRoleCommand(RoleCommand.SERVICE_CHECK);
-    hostRoleCommandEntity.setCommandDetail(COMMAND_DETAIL);
-    hostRoleCommandEntity.setStartTime(SERVICE_CHECK_START_TIME);
-    hostRoleCommandEntity.setRole(Role.HDFS_SERVICE_CHECK);
+    LastServiceCheckDTO lastServiceCheckDTO = new LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
 
     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-    when(hostRoleCommandDAO.findAll(any(Request.class), any(Predicate.class))).thenReturn(singletonList(hostRoleCommandEntity));
+    when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(singletonList(lastServiceCheckDTO));
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
     serviceCheckValidityCheck.perform(check, new PrereqCheckRequest(CLUSTER_NAME));
@@ -192,7 +186,7 @@ public class ServiceCheckValidityCheckTest {
     serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
 
     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-    when(hostRoleCommandDAO.findAll(any(Request.class), any(Predicate.class))).thenReturn(Collections.emptyList());
+    when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(Collections.<LastServiceCheckDTO>emptyList());
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
     serviceCheckValidityCheck.perform(check, new PrereqCheckRequest(CLUSTER_NAME));
@@ -211,23 +205,49 @@ public class ServiceCheckValidityCheckTest {
     serviceConfigEntity.setServiceName(SERVICE_NAME);
     serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
 
-    HostRoleCommandEntity hostRoleCommandEntity1 = new HostRoleCommandEntity();
-    hostRoleCommandEntity1.setRoleCommand(RoleCommand.SERVICE_CHECK);
-    hostRoleCommandEntity1.setCommandDetail(COMMAND_DETAIL);
-    hostRoleCommandEntity1.setStartTime(SERVICE_CHECK_START_TIME);
-    hostRoleCommandEntity1.setRole(Role.HDFS_SERVICE_CHECK);
-
-    HostRoleCommandEntity hostRoleCommandEntity2 = new HostRoleCommandEntity();
-    hostRoleCommandEntity2.setRoleCommand(RoleCommand.SERVICE_CHECK);
-    hostRoleCommandEntity2.setCommandDetail(COMMAND_DETAIL);
-    hostRoleCommandEntity2.setStartTime(CONFIG_CREATE_TIMESTAMP - 1L);
-    hostRoleCommandEntity2.setRole(Role.HDFS_SERVICE_CHECK);
+    LastServiceCheckDTO lastServiceCheckDTO1 = new LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
+    LastServiceCheckDTO lastServiceCheckDTO2 = new LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), CONFIG_CREATE_TIMESTAMP - 1L);
 
     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-    when(hostRoleCommandDAO.findAll(any(Request.class), any(Predicate.class))).thenReturn(asList(hostRoleCommandEntity1, hostRoleCommandEntity2));
+    when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1, lastServiceCheckDTO2));
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
     serviceCheckValidityCheck.perform(check, new PrereqCheckRequest(CLUSTER_NAME));
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
   }
+
+  /**
+   * Tests that old, oudated service checks for the FOO2 service doesn't cause
+   * problems when checking values for the FOO service.
+   * <p/>
+   * The specific test case here is that the FOO2 service was added a long time
+   * ago and then removed. We don't want old service checks for FOO2 to match
+   * when querying for FOO.
+   *
+   * @throws AmbariException
+   */
+  @Test
+  public void testPassWhenSimilarlyNamedServiceIsOutdated() throws AmbariException {
+    ServiceComponent serviceComponent = mock(ServiceComponent.class);
+    when(serviceComponent.isVersionAdvertised()).thenReturn(true);
+
+    when(service.getMaintenanceState()).thenReturn(MaintenanceState.OFF);
+    when(service.getServiceComponents()).thenReturn(ImmutableMap.of(SERVICE_COMPONENT_NAME, serviceComponent));
+
+    ServiceConfigEntity serviceConfigEntity = new ServiceConfigEntity();
+    serviceConfigEntity.setServiceName(SERVICE_NAME);
+    serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
+
+    String hdfsRole = Role.HDFS_SERVICE_CHECK.name();
+    String hdfs2Role = hdfsRole.replace("HDFS", "HDFS2");
+
+    LastServiceCheckDTO lastServiceCheckDTO1 = new LastServiceCheckDTO(hdfsRole, SERVICE_CHECK_START_TIME);
+    LastServiceCheckDTO lastServiceCheckDTO2 = new LastServiceCheckDTO(hdfs2Role, CONFIG_CREATE_TIMESTAMP - 1L);
+
+    when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
+    when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1, lastServiceCheckDTO2));
+
+    PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
+    serviceCheckValidityCheck.perform(check, new PrereqCheckRequest(CLUSTER_NAME));
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 9309abe..b370829 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -6977,7 +6977,7 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(1, responsesWithParams.size());
     StackVersionResponse resp = responsesWithParams.iterator().next();
     assertNotNull(resp.getUpgradePacks());
-    assertEquals(15, resp.getUpgradePacks().size());
+    assertTrue(resp.getUpgradePacks().size() > 0);
     assertTrue(resp.getUpgradePacks().contains("upgrade_test"));
   }
 
@@ -9387,6 +9387,7 @@ public class AmbariManagementControllerTest {
     List<Long> requestIDs = actionDB.getRequestsByStatus(null, 1, false);
     Request request = actionDB.getRequest(requestIDs.get(0));
     assertEquals("Update Include/Exclude Files for [HDFS]", request.getRequestContext());
+    assertEquals(false, request.isExclusive());
     Type type = new TypeToken<Map<String, String>>(){}.getType();
     Map<String, String> requestParams = StageUtils.getGson().fromJson(request.getInputs(), type);
     assertEquals(2, requestParams.size());
@@ -10422,6 +10423,17 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("FILES_LOCAL", layoutUserWidgetEntities.get(2).getWidget().getWidgetName());
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
+
+    candidateLayoutEntity = null;
+    for (WidgetLayoutEntity entity : layoutEntities) {
+      if (entity.getLayoutName().equals("default_system_heatmap")) {
+        candidateLayoutEntity = entity;
+        break;
+      }
+    }
+    Assert.assertNotNull(candidateLayoutEntity);
+    Assert.assertEquals("ambari", candidateVisibleEntity.getAuthor());
+    Assert.assertEquals("CLUSTER", candidateVisibleEntity.getScope());
   }
 
   // this is a temporary measure as a result of moving updateHostComponents from AmbariManagementController

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ae98dbe/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 734dd7e..68d6349 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -4997,6 +4997,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Test
   public void testAtlas() throws Exception {
     final String expectedHostGroupName = "host_group_1";
+    final String zkHostGroupName = "zk_host_group";
     final String host1 = "c6401.ambari.apache.org";
     final String host2 = "c6402.ambari.apache.org";
     final String host3 = "c6403.ambari.apache.org";
@@ -5015,18 +5016,24 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     Configuration clusterConfig = new Configuration(properties, Collections.emptyMap());
 
-    Collection<String> hgComponents = new HashSet<>();
-    hgComponents.add("KAFKA_BROKER");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    hgComponents.add("HBASE_MASTER");
+    Collection<String> hg1Components = new HashSet<>();
+    hg1Components.add("KAFKA_BROKER");
+    hg1Components.add("HBASE_MASTER");
     List<String> hosts = new ArrayList<>();
     hosts.add(host1);
     hosts.add(host2);
-    hosts.add(host3);
-    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, hosts);
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hg1Components, hosts);
+
+    // Place ZOOKEEPER_SERVER in separate host group/host other
+    // than ATLAS
+    Collection<String> zkHostGroupComponents = new HashSet<>();
+    zkHostGroupComponents.add("ZOOKEEPER_SERVER");
+
+    TestHostGroup group2 = new TestHostGroup(zkHostGroupName, zkHostGroupComponents, Collections.singletonList(host3));
 
     Collection<TestHostGroup> hostGroups = new HashSet<>();
     hostGroups.add(group1);
+    hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
     BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
@@ -5037,29 +5044,29 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     List<String> hostArray =
       Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
     List<String> expected =
-      Arrays.asList("c6401.ambari.apache.org:6667", "c6402.ambari.apache.org:6667", "c6403.ambari.apache.org:6667");
+      Arrays.asList("c6401.ambari.apache.org:6667", "c6402.ambari.apache.org:6667");
 
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.kafka.zookeeper.connect").split(","));
     expected =
-      Arrays.asList("c6401.ambari.apache.org:2181", "c6402.ambari.apache.org:2181", "c6403.ambari.apache.org:2181");
+      Arrays.asList("c6403.ambari.apache.org:2181");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.index.search.solr.zookeeper-url").split(","));
     expected =
-      Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr", "c6402.ambari.apache.org:2181/ambari-solr", "c6403.ambari.apache.org:2181/ambari-solr");
+      Arrays.asList("c6403.ambari.apache.org:2181/ambari-solr");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.storage.hostname").split(","));
     expected =
-      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.audit.hbase.zookeeper.quorum").split(","));
     expected =
-      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
   }