You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2017/09/28 13:25:33 UTC

[44/50] [abbrv] ambari git commit: Merge remote-tracking branch 'remotes/origin/trunk' into branch-3.0-perf

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 5e0d707,eb39562..67f23ac
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@@ -165,17 -200,100 +200,100 @@@ public class AmbariContext 
      return getController().getActionManager().getTasks(ids);
    }
  
-   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
+   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType,
+                                     String repoVersionString, Long repoVersionId) {
      Stack stack = topology.getBlueprint().getStack();
+     StackId stackId = new StackId(stack.getName(), stack.getVersion());
+ 
+     RepositoryVersionEntity repoVersion = null;
 -    if (StringUtils.isEmpty(repoVersionString) && null == repoVersionId) {
++    if (null == repoVersionString && null == repoVersionId) {
+       List<RepositoryVersionEntity> stackRepoVersions = repositoryVersionDAO.findByStack(stackId);
+ 
+       if (stackRepoVersions.isEmpty()) {
+         // !!! no repos, try to get the version for the stack
+         VersionDefinitionResourceProvider vdfProvider = getVersionDefinitionResourceProvider();
+ 
+         Map<String, Object> properties = new HashMap<>();
+         properties.put(VersionDefinitionResourceProvider.VERSION_DEF_AVAILABLE_DEFINITION, stackId.toString());
+ 
+         Request request = new RequestImpl(Collections.<String>emptySet(),
+             Collections.singleton(properties), Collections.<String, String>emptyMap(), null);
+ 
+         Long defaultRepoVersionId = null;
+ 
+         try {
+           RequestStatus requestStatus = vdfProvider.createResources(request);
+           if (!requestStatus.getAssociatedResources().isEmpty()) {
+             Resource resource = requestStatus.getAssociatedResources().iterator().next();
+             defaultRepoVersionId = (Long) resource.getPropertyValue(VersionDefinitionResourceProvider.VERSION_DEF_ID);
+           }
+         } catch (Exception e) {
+           throw new IllegalArgumentException(String.format(
+               "Failed to create a default repository version definition for stack %s. "
+               + "This typically is a result of not loading the stack correctly or being able "
+               + "to load information about released versions.  Create a repository version "
+               + " and try again.", stackId), e);
+         }
+ 
+         repoVersion = repositoryVersionDAO.findByPK(defaultRepoVersionId);
+         // !!! better not!
+         if (null == repoVersion) {
+           throw new IllegalArgumentException(String.format(
+               "Failed to load the default repository version definition for stack %s. "
+               + "Check for a valid repository version and try again.", stackId));
+         }
+ 
+       } else if (stackRepoVersions.size() > 1) {
+ 
+         Function<RepositoryVersionEntity, String> function = new Function<RepositoryVersionEntity, String>() {
+           @Override
+           public String apply(RepositoryVersionEntity input) {
+             return input.getVersion();
+           }
+         };
+ 
+         Collection<String> versions = Collections2.transform(stackRepoVersions, function);
  
-     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-     createAmbariServiceAndComponentResources(topology, clusterName);
+         throw new IllegalArgumentException(String.format("Several repositories were found for %s:  %s.  Specify the version"
+             + " with '%s'", stackId, StringUtils.join(versions, ", "), ProvisionClusterRequest.REPO_VERSION_PROPERTY));
+       } else {
+         repoVersion = stackRepoVersions.get(0);
+         LOG.warn("Cluster is being provisioned using the single matching repository version {}", repoVersion.getVersion());
+       }
+     } else if (null != repoVersionId){
+       repoVersion = repositoryVersionDAO.findByPK(repoVersionId);
+ 
+       if (null == repoVersion) {
+         throw new IllegalArgumentException(String.format(
+           "Could not identify repository version with repository version id %s for installing services. "
+             + "Specify a valid repository version id with '%s'",
+           repoVersionId, ProvisionClusterRequest.REPO_VERSION_ID_PROPERTY));
+       }
+     } else {
+       repoVersion = repositoryVersionDAO.findByStackAndVersion(stackId, repoVersionString);
+ 
+       if (null == repoVersion) {
+         throw new IllegalArgumentException(String.format(
+           "Could not identify repository version with stack %s and version %s for installing services. "
+             + "Specify a valid version with '%s'",
+           stackId, repoVersionString, ProvisionClusterRequest.REPO_VERSION_PROPERTY));
+       }
+     }
+ 
+     // only use a STANDARD repo when creating a new cluster
+     if (repoVersion.getType() != RepositoryType.STANDARD) {
+       throw new IllegalArgumentException(String.format(
+           "Unable to create a cluster using the following repository since it is not a STANDARD type: %s",
+           repoVersion));
+     }
+ 
+     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType);
+     createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion.getId());
    }
  
-   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
+   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType) {
      String stackInfo = String.format("%s-%s", stackName, stackVersion);
      final ClusterRequest clusterRequest = new ClusterRequest(null, clusterName, null, securityType, stackInfo, null);
-     clusterRequest.setRepositoryVersion(repoVersion);
  
      try {
        RetryHelper.executeWithRetry(new Callable<Object>() {
@@@ -702,4 -840,14 +840,14 @@@
      }
      return componentResourceProvider;
    }
+ 
+   private synchronized VersionDefinitionResourceProvider getVersionDefinitionResourceProvider() {
+     if (versionDefinitionResourceProvider == null) {
+       versionDefinitionResourceProvider = (VersionDefinitionResourceProvider) ClusterControllerHelper.
+           getClusterController().ensureResourceProvider(Resource.Type.VersionDefinition);
+     }
+     return versionDefinitionResourceProvider;
+ 
+   }
+ 
 -}
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 643945c,9769fae..0863e37
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@@ -29,12 -29,11 +29,10 @@@ import java.util.List
  import java.util.Map;
  import java.util.Set;
  import java.util.concurrent.Callable;
 +import java.util.concurrent.Executor;
  import java.util.concurrent.ExecutorService;
  import java.util.concurrent.Executors;
 -import java.util.concurrent.LinkedBlockingQueue;
 -import java.util.concurrent.TimeUnit;
  
- import javax.inject.Inject;
- 
  import org.apache.ambari.server.AmbariException;
  import org.apache.ambari.server.actionmanager.HostRoleCommand;
  import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@@ -80,6 -80,7 +78,8 @@@ import org.slf4j.Logger
  import org.slf4j.LoggerFactory;
  
  import com.google.common.eventbus.Subscribe;
+ import com.google.inject.Inject;
++import com.google.inject.Injector;
  import com.google.inject.Singleton;
  import com.google.inject.persist.Transactional;
  
@@@ -116,6 -131,6 +116,9 @@@ public class TopologyManager 
    private Map<Long, ClusterTopology> clusterTopologyMap = new HashMap<>();
  
    @Inject
++  private Injector injector;
++
++  @Inject
    private StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor;
  
    @Inject
@@@ -273,6 -287,6 +277,10 @@@
      SecurityType securityType = null;
      Credential credential = null;
  
++    if (null == repoVersion && null == repoVersionID) {
++      throw new AmbariException("Repository should be created and the version passed in the request.");
++    }
++
      SecurityConfiguration securityConfiguration = processSecurityConfiguration(request);
  
      if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS) {
@@@ -324,16 -343,6 +333,15 @@@
      addClusterConfigRequest(topology, new ClusterConfigurationRequest(ambariContext, topology, true,
        stackAdvisorBlueprintProcessor, securityType == SecurityType.KERBEROS));
  
- 
 +    // Notify listeners that cluster configuration finished
 +    executor.submit(new Callable<Boolean>() {
 +      @Override
 +      public Boolean call() throws Exception {
 +        ambariEventPublisher.publish(new ClusterConfigFinishedEvent(clusterName));
 +        return Boolean.TRUE;
 +      }
 +    });
 +
      // Process the logical request
      processRequest(request, topology, logicalRequest);
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/ConfigureClusterTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/ConfigureClusterTaskFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 1f1c647,a2e2f6f..83707ed
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@@ -42,9 -42,9 +42,11 @@@ import javax.persistence.EntityManager
  import javax.xml.parsers.DocumentBuilder;
  import javax.xml.parsers.DocumentBuilderFactory;
  
+ import org.apache.ambari.annotations.Experimental;
+ import org.apache.ambari.annotations.ExperimentalFeature;
  import org.apache.ambari.server.AmbariException;
 +import org.apache.ambari.server.agent.stomp.AgentConfigsHolder;
 +import org.apache.ambari.server.agent.stomp.MetadataHolder;
  import org.apache.ambari.server.api.services.AmbariMetaInfo;
  import org.apache.ambari.server.configuration.Configuration;
  import org.apache.ambari.server.configuration.Configuration.DatabaseType;
@@@ -105,14 -105,6 +108,12 @@@ public abstract class AbstractUpgradeCa
    protected DBAccessor dbAccessor;
    @Inject
    protected Configuration configuration;
 +  @Inject
-   protected StackUpgradeUtil stackUpgradeUtil;
-   @Inject
 +  protected Provider<AgentConfigsHolder> m_agentConfigsHolder;
 +  @Inject
 +  protected Provider<MetadataHolder> m_metadataHolder;
 +  @Inject
 +  protected AmbariManagementControllerImpl ambariManagementController;
  
    protected Injector injector;
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index 0000000,c31469e..d1de998
mode 000000,100644..100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@@ -1,0 -1,827 +1,498 @@@
 -/*
++/**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
 - *
 - *     http://www.apache.org/licenses/LICENSE-2.0
 - *
++ * <p>
++ * http://www.apache.org/licenses/LICENSE-2.0
++ * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.ambari.server.upgrade;
+ 
+ import static org.apache.ambari.server.view.ViewContextImpl.CORE_SITE;
+ 
+ import java.sql.SQLException;
+ import java.util.ArrayList;
 -import java.util.Arrays;
 -import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import javax.persistence.EntityManager;
+ import javax.persistence.Query;
+ 
+ import org.apache.ambari.server.AmbariException;
 -import org.apache.ambari.server.controller.AmbariManagementController;
+ import org.apache.ambari.server.orm.DBAccessor;
 -import org.apache.ambari.server.orm.dao.ArtifactDAO;
 -import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 -import org.apache.ambari.server.orm.entities.ArtifactEntity;
+ import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 -import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+ import org.apache.ambari.server.state.Cluster;
+ import org.apache.ambari.server.state.Clusters;
+ import org.apache.ambari.server.state.Config;
 -import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor;
 -import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
 -import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 -import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 -import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 -import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
 -import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
 -import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+ import org.apache.commons.lang.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.inject.Inject;
+ import com.google.inject.Injector;
+ 
+ /**
+  * The {@link org.apache.ambari.server.upgrade.UpgradeCatalog260} upgrades Ambari from 2.5.2 to 2.6.0.
+  */
+ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
+ 
+   public static final String CLUSTER_CONFIG_MAPPING_TABLE = "clusterconfigmapping";
+   public static final String CLUSTER_VERSION_TABLE = "cluster_version";
+   public static final String CLUSTER_ID_COLUMN = "cluster_id";
+   public static final String STATE_COLUMN = "state";
+   public static final String CREATE_TIMESTAMP_COLUMN = "create_timestamp";
+   public static final String VERSION_TAG_COLUMN = "version_tag";
+   public static final String TYPE_NAME_COLUMN = "type_name";
+ 
+   public static final String CLUSTER_CONFIG_TABLE = "clusterconfig";
+   public static final String SELECTED_COLUMN = "selected";
+   public static final String SERVICE_DELETED_COLUMN = "service_deleted";
+   public static final String UNMAPPED_COLUMN = "unmapped";
+   public static final String SELECTED_TIMESTAMP_COLUMN = "selected_timestamp";
+ 
+   public static final String SERVICE_COMPONENT_DESIRED_STATE_TABLE = "servicecomponentdesiredstate";
+   public static final String DESIRED_STACK_ID_COLUMN = "desired_stack_id";
+   public static final String DESIRED_VERSION_COLUMN = "desired_version";
+   public static final String DESIRED_REPO_VERSION_ID_COLUMN = "desired_repo_version_id";
+   public static final String REPO_STATE_COLUMN = "repo_state";
+   public static final String FK_SCDS_DESIRED_STACK_ID = "FK_scds_desired_stack_id";
+   public static final String FK_SCDS_DESIRED_REPO_ID = "FK_scds_desired_repo_id";
+ 
+   public static final String REPO_VERSION_TABLE = "repo_version";
+   public static final String REPO_VERSION_ID_COLUMN = "repo_version_id";
 -  public static final String REPO_VERSION_RESOLVED_COLUMN = "resolved";
+   public static final String REPO_VERSION_HIDDEN_COLUMN = "hidden";
+ 
+   public static final String HOST_COMPONENT_DESIRED_STATE_TABLE = "hostcomponentdesiredstate";
+   public static final String FK_HCDS_DESIRED_STACK_ID = "FK_hcds_desired_stack_id";
+ 
+   public static final String HOST_COMPONENT_STATE_TABLE = "hostcomponentstate";
+   public static final String CURRENT_STACK_ID_COLUMN = "current_stack_id";
+   public static final String FK_HCS_CURRENT_STACK_ID = "FK_hcs_current_stack_id";
+ 
+   public static final String HOST_VERSION_TABLE = "host_version";
+   public static final String UQ_HOST_REPO = "UQ_host_repo";
+   public static final String HOST_ID_COLUMN = "host_id";
+ 
+   public static final String SERVICE_DESIRED_STATE_TABLE = "servicedesiredstate";
+   public static final String FK_SDS_DESIRED_STACK_ID = "FK_sds_desired_stack_id";
+   public static final String FK_REPO_VERSION_ID = "FK_repo_version_id";
+ 
+   public static final String UPGRADE_TABLE = "upgrade";
+   public static final String UPGRADE_GROUP_TABLE = "upgrade_group";
+   public static final String UPGRADE_ITEM_TABLE = "upgrade_item";
+   public static final String FROM_REPO_VERSION_ID_COLUMN = "from_repo_version_id";
+   public static final String TO_REPO_VERSION_ID_COLUMN = "to_repo_version_id";
+   public static final String ORCHESTRATION_COLUMN = "orchestration";
+   public static final String ALLOW_REVERT_COLUMN = "revert_allowed";
+   public static final String FK_UPGRADE_FROM_REPO_ID = "FK_upgrade_from_repo_id";
+   public static final String FK_UPGRADE_TO_REPO_ID = "FK_upgrade_to_repo_id";
+   public static final String FK_UPGRADE_REPO_VERSION_ID = "FK_upgrade_repo_version_id";
 -  public static final String UPGRADE_ITEM_ITEM_TEXT = "item_text";
+ 
+   public static final String SERVICE_COMPONENT_HISTORY_TABLE = "servicecomponent_history";
+   public static final String UPGRADE_HISTORY_TABLE = "upgrade_history";
+   public static final String ID_COLUMN = "id";
+   public static final String UPGRADE_ID_COLUMN = "upgrade_id";
+   public static final String SERVICE_NAME_COLUMN = "service_name";
+   public static final String COMPONENT_NAME_COLUMN = "component_name";
+   public static final String TARGET_REPO_VERSION_ID_COLUMN = "target_repo_version_id";
+   public static final String PK_UPGRADE_HIST = "PK_upgrade_hist";
+   public static final String FK_UPGRADE_HIST_UPGRADE_ID = "FK_upgrade_hist_upgrade_id";
+   public static final String FK_UPGRADE_HIST_FROM_REPO = "FK_upgrade_hist_from_repo";
+   public static final String FK_UPGRADE_HIST_TARGET_REPO = "FK_upgrade_hist_target_repo";
+   public static final String UQ_UPGRADE_HIST = "UQ_upgrade_hist";
+   public static final String SERVICE_CONFIG_MAPPING_TABLE = "serviceconfigmapping";
+   public static final String SERVICE_COMPONENT_DESIRED_STATE = "servicecomponentdesiredstate";
+   public static final String HOST_COMPONENT_DESIRED_STATE = "hostcomponentdesiredstate";
+   public static final String HOST_COMPONENT_STATE = "hostcomponentstate";
+ 
 -  public static final String AMS_SSL_CLIENT = "ams-ssl-client";
 -  public static final String METRIC_TRUSTSTORE_ALIAS = "ssl.client.truststore.alias";
 -
+   /**
+    * Logger.
+    */
+   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog260.class);
+   public static final String STANDARD = "STANDARD";
+   public static final String NOT_REQUIRED = "NOT_REQUIRED";
+   public static final String CURRENT = "CURRENT";
+   public static final String SELECTED = "1";
 -  public static final String VIEWURL_TABLE = "viewurl";
 -  public static final String PK_VIEWURL = "PK_viewurl";
 -  public static final String URL_ID_COLUMN = "url_id";
 -  public static final String STALE_POSTGRESS_VIEWURL_PKEY = "viewurl_pkey";
 -  public static final String USERS_TABLE = "users";
 -  public static final String STALE_POSTGRESS_USERS_LDAP_USER_KEY = "users_ldap_user_key";
+ 
+ 
+   /**
+    * Constructor.
+    *
+    * @param injector
+    */
+   @Inject
+   public UpgradeCatalog260(Injector injector) {
+     super(injector);
+   }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public String getSourceVersion()
+   {
+     return "2.5.2";
+   }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public String getTargetVersion() {
+     return "2.6.0";
+   }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   protected void executeDDLUpdates() throws AmbariException, SQLException {
+     int currentVersionID = getCurrentVersionID();
+     updateServiceComponentDesiredStateTable(currentVersionID);
+     updateServiceDesiredStateTable(currentVersionID);
+     addSelectedCollumsToClusterconfigTable();
+     updateHostComponentDesiredStateTable();
+     updateHostComponentStateTable();
+     dropStaleTables();
+     updateUpgradeTable();
+     createUpgradeHistoryTable();
+     updateRepositoryVersionTable();
+     renameServiceDeletedColumn();
 -    expandUpgradeItemItemTextColumn();
 -    addViewUrlPKConstraint();
 -    removeStaleConstraints();
 -  }
 -
 -
 -  /**
 -   * Updates {@value #VIEWURL_TABLE} table.
 -   * Adds the {@value #PK_VIEWURL} constraint.
 -   */
 -  private void addViewUrlPKConstraint() throws SQLException {
 -    dbAccessor.dropPKConstraint(VIEWURL_TABLE, STALE_POSTGRESS_VIEWURL_PKEY);
 -    dbAccessor.addPKConstraint(VIEWURL_TABLE, PK_VIEWURL, URL_ID_COLUMN);
 -  }
 -
 -  /**
 -   * remove stale unnamed constraints
 -   */
 -  private void removeStaleConstraints() throws SQLException {
 -    dbAccessor.dropUniqueConstraint(USERS_TABLE, STALE_POSTGRESS_USERS_LDAP_USER_KEY);
 -  }
 -
 -  /**
 -   * Expand item_text column of upgrade_item
 -   */
 -  private void expandUpgradeItemItemTextColumn() throws SQLException {
 -    dbAccessor.changeColumnType(UPGRADE_ITEM_TABLE, UPGRADE_ITEM_ITEM_TEXT,
 -      String.class, char[].class);
+   }
+ 
+   private void renameServiceDeletedColumn() throws AmbariException, SQLException {
+     if (dbAccessor.tableHasColumn(CLUSTER_CONFIG_TABLE, SERVICE_DELETED_COLUMN)) {
+       dbAccessor.renameColumn(CLUSTER_CONFIG_TABLE, SERVICE_DELETED_COLUMN, new DBAccessor.DBColumnInfo(UNMAPPED_COLUMN, Short.class, null, 0, false));
+     }
+   }
+ 
+   /*
+   * This method, search for configs which are not linked with any service
+   * and set "unmapped" to true. We need that because in case when
+   * config is not mapped and "unmapped" flag = false, db consistency check
+   * will show warning about not mapped config. (AMBARI-21795)
+   * */
+   private void setUnmappedForOrphanedConfigs() {
+     executeInTransaction(new Runnable() {
+       @Override
+       public void run() {
+         EntityManager entityManager = getEntityManagerProvider().get();
+         Query query = entityManager.createNamedQuery("ClusterConfigEntity.findNotMappedClusterConfigsToService",ClusterConfigEntity.class);
+ 
+         List<ClusterConfigEntity> notMappedConfigs =  query.getResultList();
+         if (notMappedConfigs != null) {
+           for (ClusterConfigEntity clusterConfigEntity : notMappedConfigs) {
+             clusterConfigEntity.setUnmapped(true);
+             entityManager.merge(clusterConfigEntity);
+           }
+         }
+       }
+     });
+   }
+ 
+   private void createUpgradeHistoryTable() throws SQLException {
+     List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
+ 
+     columns.add(new DBAccessor.DBColumnInfo(ID_COLUMN, Long.class, null, null, false));
+     columns.add(new DBAccessor.DBColumnInfo(UPGRADE_ID_COLUMN, Long.class, null, null, false));
+     columns.add(new DBAccessor.DBColumnInfo(SERVICE_NAME_COLUMN, String.class, 255, null, false));
+     columns.add(new DBAccessor.DBColumnInfo(COMPONENT_NAME_COLUMN, String.class, 255, null, false));
+     columns.add(new DBAccessor.DBColumnInfo(FROM_REPO_VERSION_ID_COLUMN, Long.class, null, null, false));
+     columns.add(new DBAccessor.DBColumnInfo(TARGET_REPO_VERSION_ID_COLUMN, Long.class, null, null, false));
+     dbAccessor.createTable(UPGRADE_HISTORY_TABLE, columns);
+ 
+     dbAccessor.addPKConstraint(UPGRADE_HISTORY_TABLE, PK_UPGRADE_HIST, ID_COLUMN);
+ 
+     dbAccessor.addFKConstraint(UPGRADE_HISTORY_TABLE, FK_UPGRADE_HIST_UPGRADE_ID, UPGRADE_ID_COLUMN, UPGRADE_TABLE, UPGRADE_ID_COLUMN, false);
+     dbAccessor.addFKConstraint(UPGRADE_HISTORY_TABLE, FK_UPGRADE_HIST_FROM_REPO, FROM_REPO_VERSION_ID_COLUMN, REPO_VERSION_TABLE, REPO_VERSION_ID_COLUMN, false);
+     dbAccessor.addFKConstraint(UPGRADE_HISTORY_TABLE, FK_UPGRADE_HIST_TARGET_REPO, TARGET_REPO_VERSION_ID_COLUMN, REPO_VERSION_TABLE, REPO_VERSION_ID_COLUMN, false);
+     dbAccessor.addUniqueConstraint(UPGRADE_HISTORY_TABLE, UQ_UPGRADE_HIST, UPGRADE_ID_COLUMN, COMPONENT_NAME_COLUMN, SERVICE_NAME_COLUMN);
+ 
+     addSequence("upgrade_history_id_seq", 0L, false);
+   }
+ 
+   /**
+    * Updates {@value #UPGRADE_TABLE} table.
+    * clear {@value #UPGRADE_TABLE} table
+    * Removes {@value #FROM_REPO_VERSION_ID_COLUMN} column.
+    * Removes {@value #TO_REPO_VERSION_ID_COLUMN} column.
+    * Adds the {@value #ORCHESTRATION_COLUMN} column.
+    * Adds the {@value #REPO_VERSION_ID_COLUMN} column.
+    * Removes {@value #FK_UPGRADE_FROM_REPO_ID} foreign key.
+    * Removes {@value #FK_UPGRADE_TO_REPO_ID} foreign key.
+    * adds {@value #FK_REPO_VERSION_ID} foreign key.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateUpgradeTable() throws SQLException {
+     dbAccessor.clearTable(UPGRADE_ITEM_TABLE);
+     dbAccessor.clearTable(UPGRADE_GROUP_TABLE);
+     dbAccessor.clearTable(UPGRADE_TABLE);
+     dbAccessor.dropFKConstraint(UPGRADE_TABLE, FK_UPGRADE_FROM_REPO_ID);
+     dbAccessor.dropFKConstraint(UPGRADE_TABLE, FK_UPGRADE_TO_REPO_ID);
+     dbAccessor.dropColumn(UPGRADE_TABLE, FROM_REPO_VERSION_ID_COLUMN);
+     dbAccessor.dropColumn(UPGRADE_TABLE, TO_REPO_VERSION_ID_COLUMN);
+ 
+     dbAccessor.addColumn(UPGRADE_TABLE,
+         new DBAccessor.DBColumnInfo(REPO_VERSION_ID_COLUMN, Long.class, null, null, false));
+ 
+     dbAccessor.addColumn(UPGRADE_TABLE,
+         new DBAccessor.DBColumnInfo(ORCHESTRATION_COLUMN, String.class, 255, STANDARD, false));
+ 
+     dbAccessor.addColumn(UPGRADE_TABLE,
+         new DBAccessor.DBColumnInfo(ALLOW_REVERT_COLUMN, Short.class, null, 0, false));
+ 
+     dbAccessor.addFKConstraint(UPGRADE_TABLE, FK_UPGRADE_REPO_VERSION_ID, REPO_VERSION_ID_COLUMN, REPO_VERSION_TABLE, REPO_VERSION_ID_COLUMN, false);
+   }
+ 
+   /**
+    * Updates {@value #SERVICE_DESIRED_STATE_TABLE} table.
+    * Removes {@value #DESIRED_STACK_ID_COLUMN} column.
+    * Adds the {@value #DESIRED_REPO_VERSION_ID_COLUMN} column.
+    * Removes {@value #FK_SDS_DESIRED_STACK_ID} foreign key.
+    * adds {@value #FK_REPO_VERSION_ID} foreign key.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateServiceDesiredStateTable(int currentRepoID) throws SQLException {
+ 
+     dbAccessor.addColumn(SERVICE_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, currentRepoID, false));
+     dbAccessor.alterColumn(SERVICE_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, null, false));
+ 
+     dbAccessor.addFKConstraint(SERVICE_DESIRED_STATE_TABLE, FK_REPO_VERSION_ID, DESIRED_REPO_VERSION_ID_COLUMN, REPO_VERSION_TABLE, REPO_VERSION_ID_COLUMN, false);
+     dbAccessor.dropFKConstraint(SERVICE_DESIRED_STATE_TABLE, FK_SDS_DESIRED_STACK_ID);
+     dbAccessor.dropColumn(SERVICE_DESIRED_STATE_TABLE, DESIRED_STACK_ID_COLUMN);
+   }
+ 
+   /**
+    * drop {@value #CLUSTER_CONFIG_MAPPING_TABLE} and {@value #CLUSTER_VERSION_TABLE} tables.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void dropStaleTables() throws SQLException {
+     dbAccessor.dropTable(CLUSTER_CONFIG_MAPPING_TABLE);
+     dbAccessor.dropTable(CLUSTER_VERSION_TABLE);
+     dbAccessor.dropTable(SERVICE_COMPONENT_HISTORY_TABLE);
+   }
+ 
+   /**
+    * Adds the {@value #SELECTED_COLUMN} and {@value #SELECTED_TIMESTAMP_COLUMN} columns to the
+    * {@value #CLUSTER_CONFIG_TABLE} table.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void addSelectedCollumsToClusterconfigTable() throws SQLException {
+     DBAccessor.DBColumnInfo selectedColumnInfo = new DBAccessor.DBColumnInfo(SELECTED_COLUMN, Short.class, null, 0, false);
+     DBAccessor.DBColumnInfo selectedmappingColumnInfo = new DBAccessor.DBColumnInfo(SELECTED_COLUMN, Integer.class, null, 0, false);
+     DBAccessor.DBColumnInfo selectedTimestampColumnInfo = new DBAccessor.DBColumnInfo(SELECTED_TIMESTAMP_COLUMN, Long.class, null, 0, false);
+     DBAccessor.DBColumnInfo createTimestampColumnInfo = new DBAccessor.DBColumnInfo(CREATE_TIMESTAMP_COLUMN, Long.class, null, null, false);
+     dbAccessor.copyColumnToAnotherTable(CLUSTER_CONFIG_MAPPING_TABLE, selectedmappingColumnInfo,
+         CLUSTER_ID_COLUMN, TYPE_NAME_COLUMN, VERSION_TAG_COLUMN, CLUSTER_CONFIG_TABLE, selectedColumnInfo,
+         CLUSTER_ID_COLUMN, TYPE_NAME_COLUMN, VERSION_TAG_COLUMN, SELECTED_COLUMN, SELECTED, 0);
+ 
+     dbAccessor.copyColumnToAnotherTable(CLUSTER_CONFIG_MAPPING_TABLE, createTimestampColumnInfo,
+         CLUSTER_ID_COLUMN, TYPE_NAME_COLUMN, VERSION_TAG_COLUMN, CLUSTER_CONFIG_TABLE, selectedTimestampColumnInfo,
+         CLUSTER_ID_COLUMN, TYPE_NAME_COLUMN, VERSION_TAG_COLUMN, SELECTED_COLUMN, SELECTED, 0);
+   }
+ 
+ 
+   /**
+    * Updates {@value #SERVICE_COMPONENT_DESIRED_STATE_TABLE} table.
+    * Removes {@value #DESIRED_VERSION_COLUMN},{@value #DESIRED_STACK_ID_COLUMN} columns.
+    * Adds the {@value #DESIRED_REPO_VERSION_ID_COLUMN},{@value #REPO_STATE_COLUMN} columns.
+    * Removes {@value #FK_SCDS_DESIRED_STACK_ID} foreign key.
+    * adds {@value #FK_SCDS_DESIRED_REPO_ID} foreign key.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateServiceComponentDesiredStateTable(int currentRepoID) throws SQLException {
+     dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, currentRepoID, false));
+     dbAccessor.alterColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, null, false));
+ 
+     dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(REPO_STATE_COLUMN, String.class, 255, CURRENT, false));
+     dbAccessor.alterColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
+         new DBAccessor.DBColumnInfo(REPO_STATE_COLUMN, String.class, 255, NOT_REQUIRED, false));
+ 
+     dbAccessor.addFKConstraint(SERVICE_COMPONENT_DESIRED_STATE_TABLE, FK_SCDS_DESIRED_REPO_ID, DESIRED_REPO_VERSION_ID_COLUMN, REPO_VERSION_TABLE, REPO_VERSION_ID_COLUMN, false);
+ 
+     dbAccessor.dropFKConstraint(SERVICE_COMPONENT_DESIRED_STATE_TABLE, FK_SCDS_DESIRED_STACK_ID);
+     dbAccessor.dropColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE, DESIRED_STACK_ID_COLUMN);
+     dbAccessor.dropColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE, DESIRED_VERSION_COLUMN);
+   }
+ 
+   /**
+    * Updates {@value #HOST_COMPONENT_DESIRED_STATE_TABLE} table.
+    * Removes {@value #DESIRED_STACK_ID_COLUMN} column.
+    * Removes {@value #FK_HCDS_DESIRED_STACK_ID} foreign key.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateHostComponentDesiredStateTable() throws SQLException {
+     dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, FK_HCDS_DESIRED_STACK_ID);
+     dbAccessor.dropColumn(HOST_COMPONENT_DESIRED_STATE_TABLE, DESIRED_STACK_ID_COLUMN);
+   }
+ 
+   /**
+    * Updates {@value #HOST_COMPONENT_STATE_TABLE} table.
+    * Removes {@value #CURRENT_STACK_ID_COLUMN} column.
+    * Removes {@value #FK_HCS_CURRENT_STACK_ID} foreign key.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateHostComponentStateTable() throws SQLException {
+     dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, FK_HCS_CURRENT_STACK_ID);
+     dbAccessor.dropColumn(HOST_COMPONENT_STATE_TABLE, CURRENT_STACK_ID_COLUMN);
+   }
+ 
+   /**
 -   * Updates {@value #REPO_VERSION_TABLE} table. Adds the following columns:
 -   * <ul>
 -   * <li>{@value #REPO_VERSION_HIDDEN_COLUMN}
 -   * <li>{@value #REPO_VERSION_RESOLVED_COLUMN}
 -   * </ul>
++   * Updates {@value #REPO_VERSION_TABLE} table. Adds
++   * {@value #REPO_VERSION_HIDDEN_COLUMN} column.
+    *
+    * @throws java.sql.SQLException
+    */
+   private void updateRepositoryVersionTable() throws SQLException {
+     dbAccessor.addColumn(REPO_VERSION_TABLE,
+         new DBAccessor.DBColumnInfo(REPO_VERSION_HIDDEN_COLUMN, Short.class, null, 0, false));
 -
 -    dbAccessor.addColumn(REPO_VERSION_TABLE,
 -        new DBAccessor.DBColumnInfo(REPO_VERSION_RESOLVED_COLUMN, Short.class, null, 0, false));
+   }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   protected void executePreDMLUpdates() throws AmbariException, SQLException {
 -    removeSupersetFromDruid();
++
+   }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   protected void executeDMLUpdates() throws AmbariException, SQLException {
+     addNewConfigurationsFromXml();
+     setUnmappedForOrphanedConfigs();
++    removeSupersetFromDruid();
+     ensureZeppelinProxyUserConfigs();
 -    updateKerberosDescriptorArtifacts();
 -    updateAmsConfigs();
 -    updateHDFSWidgetDefinition();
 -    updateExistingRepositoriesToBeResolved();
+   }
+ 
+   public int getCurrentVersionID() throws AmbariException, SQLException {
+     List<Integer> currentVersionList = dbAccessor.getIntColumnValues(CLUSTER_VERSION_TABLE, REPO_VERSION_ID_COLUMN,
+         new String[]{STATE_COLUMN}, new String[]{CURRENT}, false);
+     if (currentVersionList.size() != 1) {
+       throw new AmbariException("Can't get current version id");
+     }
+     return currentVersionList.get(0);
+   }
+ 
+   // Superset is moved as an independent service in Ambari-2.6.
+   // This will remove superset component if installed under Druid Service
+   protected void removeSupersetFromDruid() throws SQLException {
+     removeComponent("DRUID_SUPERSET", "druid-superset");
+   }
+ 
+   private void removeComponent(String componentName, String configPrefix) throws SQLException {
+ 
+     String serviceConfigMappingRemoveSQL = String.format(
+         "DELETE FROM %s WHERE config_id IN (SELECT config_id from %s where type_name like '%s%%')",
+         SERVICE_CONFIG_MAPPING_TABLE, CLUSTER_CONFIG_TABLE, configPrefix);
+ 
+     String supersetConfigRemoveSQL = String.format(
+         "DELETE FROM %s WHERE type_name like '%s%%'",
+         CLUSTER_CONFIG_TABLE, configPrefix);
+ 
+     String hostComponentDesiredStateRemoveSQL = String.format(
+         "DELETE FROM %s WHERE component_name = '%s'",
+         HOST_COMPONENT_DESIRED_STATE, componentName);
+ 
+     String hostComponentStateRemoveSQL = String.format(
+         "DELETE FROM %s WHERE component_name = '%s'",
+         HOST_COMPONENT_STATE, componentName);
+ 
+     String serviceComponentDesiredStateRemoveSQL = String.format(
+         "DELETE FROM %s WHERE component_name = '%s'",
+         SERVICE_COMPONENT_DESIRED_STATE, componentName);
+ 
+     dbAccessor.executeQuery(serviceConfigMappingRemoveSQL);
+     dbAccessor.executeQuery(supersetConfigRemoveSQL);
+     dbAccessor.executeQuery(hostComponentDesiredStateRemoveSQL);
+     dbAccessor.executeQuery(hostComponentStateRemoveSQL);
+     dbAccessor.executeQuery(serviceComponentDesiredStateRemoveSQL);
+   }
+ 
+   /**
+    * If Zeppelin is installed, ensure that the proxyuser configurations are set in <code>core-site</code>.
+    * <p>
+    * The following configurations will be added, if core-site exists and the properties are not in the
+    * set of core-site properties:
+    * <ul>
+    * <li><code>"hadoop.proxyuser.{zeppelin-env/zeppelin_user}.groups": "*"</code></li>
+    * <li><code>"hadoop.proxyuser.{zeppelin-env/zeppelin_user}.hosts": "*"</code></li>
+    * </ul>
+    */
+   void ensureZeppelinProxyUserConfigs() throws AmbariException {
+     Clusters clusters = injector.getInstance(Clusters.class);
+     Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+ 
+     if ((clusterMap != null) && !clusterMap.isEmpty()) {
+       for (final Cluster cluster : clusterMap.values()) {
+         Config zeppelinEnvConfig = cluster.getDesiredConfigByType("zeppelin-env");
+ 
+         if (zeppelinEnvConfig != null) {
+           // If zeppelin-env exists, than it is assumed that Zeppelin is installed
+           Map<String, String> zeppelinEnvProperties = zeppelinEnvConfig.getProperties();
+ 
+           String zeppelinUser = null;
+           if (zeppelinEnvProperties != null) {
+             zeppelinUser = zeppelinEnvProperties.get("zeppelin_user");
+           }
+ 
+           if (!StringUtils.isEmpty(zeppelinUser)) {
+             // If the zeppelin user is set, see if the proxyuser configs need to be set
+ 
+             Config coreSiteConfig = cluster.getDesiredConfigByType(CORE_SITE);
+             if (coreSiteConfig != null) {
+               // If core-site exists, ensure the proxyuser configurations for Zeppelin are set.
+               // If they are not already set, set them to their default value.
+               String proxyUserHostsName = String.format("hadoop.proxyuser.%s.hosts", zeppelinUser);
+               String proxyUserGroupsName = String.format("hadoop.proxyuser.%s.groups", zeppelinUser);
+ 
+               Map<String, String> proxyUserProperties = new HashMap<>();
+               proxyUserProperties.put(proxyUserHostsName, "*");
+               proxyUserProperties.put(proxyUserGroupsName, "*");
+ 
+               Map<String, String> coreSiteConfigProperties = coreSiteConfig.getProperties();
+ 
+               if (coreSiteConfigProperties != null) {
+                 if (coreSiteConfigProperties.containsKey(proxyUserHostsName)) {
+                   proxyUserProperties.remove(proxyUserHostsName);
+                 }
+ 
+                 if (coreSiteConfigProperties.containsKey(proxyUserGroupsName)) {
+                   proxyUserProperties.remove(proxyUserGroupsName);
+                 }
+               }
+ 
+               if (!proxyUserProperties.isEmpty()) {
+                 updateConfigurationPropertiesForCluster(cluster, CORE_SITE, proxyUserProperties, true, false);
+               }
+             }
+           }
+         }
+       }
+     }
+   }
 -
 -  /**
 -   * {@inheritDoc}
 -   */
 -  @Override
 -  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
 -    if (artifactEntity != null) {
 -      Map<String, Object> data = artifactEntity.getArtifactData();
 -      if (data != null) {
 -        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
 -        if (kerberosDescriptor != null) {
 -          fixRangerKMSKerberosDescriptor(kerberosDescriptor);
 -          fixIdentityReferences(getCluster(artifactEntity), kerberosDescriptor);
 -
 -          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
 -          artifactDAO.merge(artifactEntity);
 -        }
 -      }
 -    }
 -  }
 -
 -  protected void fixRangerKMSKerberosDescriptor(KerberosDescriptor kerberosDescriptor) {
 -    KerberosServiceDescriptor rangerKmsServiceDescriptor = kerberosDescriptor.getService("RANGER_KMS");
 -    if (rangerKmsServiceDescriptor != null) {
 -
 -      KerberosIdentityDescriptor rangerKmsServiceIdentity = rangerKmsServiceDescriptor.getIdentity("/smokeuser");
 -      if (rangerKmsServiceIdentity != null) {
 -        rangerKmsServiceDescriptor.removeIdentity("/smokeuser");
 -      }
 -      KerberosComponentDescriptor rangerKmscomponentDescriptor = rangerKmsServiceDescriptor.getComponent("RANGER_KMS_SERVER");
 -      if (rangerKmscomponentDescriptor != null) {
 -        KerberosIdentityDescriptor rangerKmsComponentIdentity = rangerKmscomponentDescriptor.getIdentity("/smokeuser");
 -        if (rangerKmsComponentIdentity != null) {
 -          rangerKmscomponentDescriptor.removeIdentity("/smokeuser");
 -        }
 -      }
 -    }
 -  }
 -
 -  protected void updateAmsConfigs() throws AmbariException {
 -    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
 -    Clusters clusters = ambariManagementController.getClusters();
 -    if (clusters != null) {
 -      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
 -      if (clusterMap != null && !clusterMap.isEmpty()) {
 -        for (final Cluster cluster : clusterMap.values()) {
 -
 -
 -          Config amsSslClient = cluster.getDesiredConfigByType(AMS_SSL_CLIENT);
 -          if (amsSslClient != null) {
 -            Map<String, String> amsSslClientProperties = amsSslClient.getProperties();
 -
 -            if (amsSslClientProperties.containsKey(METRIC_TRUSTSTORE_ALIAS)) {
 -              LOG.info("Removing " + METRIC_TRUSTSTORE_ALIAS + " from " + AMS_SSL_CLIENT);
 -              removeConfigurationPropertiesFromCluster(cluster, AMS_SSL_CLIENT, Collections.singleton(METRIC_TRUSTSTORE_ALIAS));
 -            }
 -
 -          }
 -        }
 -      }
 -    }
 -  }
 -
 -  protected void updateHDFSWidgetDefinition() throws AmbariException {
 -    LOG.info("Updating HDFS widget definition.");
 -
 -    Map<String, List<String>> widgetMap = new HashMap<>();
 -    Map<String, String> sectionLayoutMap = new HashMap<>();
 -
 -    List<String> hdfsHeatmapWidgets = new ArrayList<>(Arrays.asList("HDFS Bytes Read", "HDFS Bytes Written",
 -      "DataNode Process Disk I/O Utilization", "DataNode Process Network I/O Utilization"));
 -    widgetMap.put("HDFS_HEATMAPS", hdfsHeatmapWidgets);
 -    sectionLayoutMap.put("HDFS_HEATMAPS", "default_hdfs_heatmap");
 -
 -    updateWidgetDefinitionsForService("HDFS", widgetMap, sectionLayoutMap);
 -  }
 -
 -  /**
 -   * Retrieves the relevant {@link Cluster} given information from the suppliied {@link ArtifactEntity}.
 -   * <p>
 -   * The cluster id value is taken from the entity's foreign key value and then used to obtain the cluster object.
 -   *
 -   * @param artifactEntity an {@link ArtifactEntity}
 -   * @return a {@link Cluster}
 -   */
 -  private Cluster getCluster(ArtifactEntity artifactEntity) {
 -    if (artifactEntity != null) {
 -      Map<String, String> keys = artifactEntity.getForeignKeys();
 -      if (keys != null) {
 -        String clusterId = keys.get("cluster");
 -        if (StringUtils.isNumeric(clusterId)) {
 -          Clusters clusters = injector.getInstance(Clusters.class);
 -          try {
 -            return clusters.getCluster(Long.valueOf(clusterId));
 -          } catch (AmbariException e) {
 -            LOG.error(String.format("Failed to obtain cluster using cluster id %s -  %s", clusterId, e.getMessage()), e);
 -          }
 -        } else {
 -          LOG.error(String.format("Failed to obtain cluster id from artifact entity with foreign keys: %s", keys));
 -        }
 -      }
 -    }
 -
 -    return null;
 -  }
 -
 -  /**
 -   * Recursively traverses the Kerberos descriptor to find and fix the identity references.
 -   * <p>
 -   * Each found identity descriptor that indicates it is a reference by having a <code>name</code>
 -   * value that starts with a "/" or a "./" is fixed by clearing the <code>principal name</code>value,
 -   * setting the <code>reference</code> value to the <code>name</code> value and changing the
 -   * <code>name</code> value to a name with the following pattern:
 -   * <code>SERVICE_COMPONENT_IDENTITY</code>
 -   * <p>
 -   * For example, if the identity is for the "SERVICE1" service and is a reference to "HDFS/NAMENODE/hdfs";
 -   * then the name is set to "<code>service1_hdfs</code>"
 -   * <p>
 -   * For example, if the identity is for the "COMPONENT21" component of the "SERVICE2" service and is a reference to "HDFS/NAMENODE/hdfs";
 -   * then the name is set to "<code>service2_component21_hdfs</code>"
 -   * <p>
 -   * Once the identity descriptor properties of the identity are fixed, the relevant configuration
 -   * value is fixed to match the value if the referenced identity. This may lead to a new version
 -   * of the relevant configuration type.
 -   *
 -   * @param cluster   the cluster
 -   * @param container the current Kerberos descriptor container
 -   * @throws AmbariException if an error occurs
 -   */
 -  private void fixIdentityReferences(Cluster cluster, AbstractKerberosDescriptorContainer container)
 -      throws AmbariException {
 -    List<KerberosIdentityDescriptor> identities = container.getIdentities();
 -    if (identities != null) {
 -      for (KerberosIdentityDescriptor identity : identities) {
 -        String name = identity.getName();
 -
 -        if (!StringUtils.isEmpty(name) && (name.startsWith("/") || name.startsWith("./"))) {
 -          String[] parts = name.split("/");
 -          String newName = buildName(identity.getParent(), parts[parts.length - 1]);
 -
 -          identity.setName(newName);
 -          identity.setReference(name);
 -        }
 -
 -        String identityReference = identity.getReference();
 -        if (!StringUtils.isEmpty(identityReference)) {
 -          // If this identity references another identity:
 -          //  * The principal name needs to be the same as the referenced identity
 -          //    - ensure that no principal name is being set for this identity
 -          //  * Any configuration set to contain the reference principal name needs to be fixed to
 -          //    be the correct principal name
 -          KerberosPrincipalDescriptor principal = identity.getPrincipalDescriptor();
 -          if (principal != null) {
 -            // Fix the value
 -            principal.setValue(null);
 -
 -            // Fix the relative configuration
 -            if (!StringUtils.isEmpty(principal.getConfiguration())) {
 -              String referencedPrincipalName = getConfiguredPrincipalNameFromReference(cluster, container, identityReference);
 -
 -              if(!StringUtils.isEmpty(referencedPrincipalName)) {
 -                String[] parts = principal.getConfiguration().split("/");
 -                if (parts.length == 2) {
 -                  String type = parts[0];
 -                  String property = parts[1];
 -
 -                  updateConfigurationPropertiesForCluster(cluster,
 -                      type,
 -                      Collections.singletonMap(property, referencedPrincipalName),
 -                      true,
 -                      false);
 -                }
 -              }
 -            }
 -          }
 -        }
 -      }
 -    }
 -
 -    if (container instanceof KerberosDescriptor) {
 -      Map<String, KerberosServiceDescriptor> services = ((KerberosDescriptor) container).getServices();
 -      if (services != null) {
 -        for (KerberosServiceDescriptor serviceDescriptor : services.values()) {
 -          fixIdentityReferences(cluster, serviceDescriptor);
 -        }
 -      }
 -    } else if (container instanceof KerberosServiceDescriptor) {
 -      Map<String, KerberosComponentDescriptor> components = ((KerberosServiceDescriptor) container).getComponents();
 -      if (components != null) {
 -        for (KerberosComponentDescriptor componentDescriptor : components.values()) {
 -          fixIdentityReferences(cluster, componentDescriptor);
 -        }
 -      }
 -    }
 -  }
 -
 -  /**
 -   * Finds the value of the configuration found for the principal in the referenced identity
 -   * descriptor.
 -   *
 -   * @param cluster           the cluster
 -   * @param container         the current {@link KerberosIdentityDescriptor}, ideally the identity's parent descriptor
 -   * @param identityReference the path to the referenced identity
 -   * @return the value of the configuration specified in the referenced identity's principal descriptor
 -   * @throws AmbariException if an error occurs
 -   */
 -  private String getConfiguredPrincipalNameFromReference(Cluster cluster,
 -                                                         AbstractKerberosDescriptorContainer container,
 -                                                         String identityReference)
 -      throws AmbariException {
 -    KerberosIdentityDescriptor identityDescriptor = container.getReferencedIdentityDescriptor(identityReference);
 -
 -    if (identityDescriptor != null) {
 -      KerberosPrincipalDescriptor principal = identityDescriptor.getPrincipalDescriptor();
 -      if ((principal != null) && (!StringUtils.isEmpty(principal.getConfiguration()))) {
 -        String[] parts = principal.getConfiguration().split("/");
 -        if (parts.length == 2) {
 -          String type = parts[0];
 -          String property = parts[1];
 -
 -          Config config = cluster.getDesiredConfigByType(type);
 -
 -          if (config != null) {
 -            return config.getProperties().get(property);
 -          }
 -        }
 -      }
 -    }
 -
 -    return null;
 -  }
 -
 -  /**
 -   * Builds the name of an identity based on the identity's container and the referenced identity's name.
 -   * <p>
 -   * The calculated name will be in the following format and converted to all lowercase characters:
 -   * <code>SERVICE_COMPONENT_IDENTITY</code>
 -   *
 -   * @param container    the current {@link KerberosIdentityDescriptor}, ideally the identity's parent descriptor
 -   * @param identityName the referenced identity's name
 -   * @return a name
 -   */
 -  private String buildName(AbstractKerberosDescriptor container, String identityName) {
 -    if (container instanceof KerberosServiceDescriptor) {
 -      return container.getName().toLowerCase() + "_" + identityName;
 -    } else if (container instanceof KerberosComponentDescriptor) {
 -      return container.getParent().getName().toLowerCase() + "_" + container.getName().toLowerCase() + "_" + identityName;
 -    } else {
 -      return identityName;
 -    }
 -  }
 -
 -  /**
 -   * Sets all existing repository versions to be resolved (we have to assume
 -   * that they are good since they've been using them to run stuff).
 -   *
 -   * @throws AmbariException
 -   */
 -  protected void updateExistingRepositoriesToBeResolved() throws AmbariException {
 -    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
 -    List<RepositoryVersionEntity> repositoryVersions = repositoryVersionDAO.findAll();
 -    for (RepositoryVersionEntity repositoryVersion : repositoryVersions) {
 -      repositoryVersion.setResolved(true);
 -      repositoryVersionDAO.merge(repositoryVersion);
 -    }
 -  }
+ }

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index 633d837,bfe2a13..f4a2a78
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@@ -294,16 -244,103 +244,103 @@@ public class UpgradeCatalog300 extends 
            Collection<Config> configs = cluster.getAllConfigs();
            for (Config config : configs) {
              String configType = config.getType();
-             if (!configType.endsWith("-logsearch-conf")) {
-               continue;
+             if (configType.endsWith("-logsearch-conf")) {
+               configHelper.removeConfigsByType(cluster, configType);
+             }
+           }
+ 
+           Config logSearchEnv = cluster.getDesiredConfigByType("logsearch-env");
+ 
+           String oldProtocolProperty = null;
+           String oldPortProperty = null;
+           if (logSearchEnv != null) {
+             oldProtocolProperty = logSearchEnv.getProperties().get("logsearch_ui_port");
+             oldPortProperty = logSearchEnv.getProperties().get("logsearch_ui_protocol");
+           }
+ 
+           Config logSearchProperties = cluster.getDesiredConfigByType("logsearch-properties");
+           Config logFeederProperties = cluster.getDesiredConfigByType("logfeeder-properties");
+           if (logSearchProperties != null && logFeederProperties != null) {
+             configHelper.createConfigType(cluster, cluster.getDesiredStackVersion(), ambariManagementController,
+                 "logsearch-common-properties", Collections.emptyMap(), "ambari-upgrade",
+                 String.format("Updated logsearch-common-properties during Ambari Upgrade from %s to %s",
+                     getSourceVersion(), getTargetVersion()));
 -            
++
+             String defaultLogLevels = logSearchProperties.getProperties().get("logsearch.logfeeder.include.default.level");
+ 
+             Set<String> removeProperties = Sets.newHashSet("logsearch.logfeeder.include.default.level");
+             removeConfigurationPropertiesFromCluster(cluster, "logsearch-properties", removeProperties);
+ 
+             Map<String, String> newLogSearchProperties = new HashMap<>();
+             if (oldProtocolProperty != null) {
+               newLogSearchProperties.put("logsearch.protocol", oldProtocolProperty);
+             }
+ 
+             if (oldPortProperty != null) {
+               newLogSearchProperties.put("logsearch.http.port", oldPortProperty);
+               newLogSearchProperties.put("logsearch.https.port", oldPortProperty);
+             }
+             if (!newLogSearchProperties.isEmpty()) {
+               updateConfigurationPropertiesForCluster(cluster, "logsearch-properties", newLogSearchProperties, true, true);
+             }
+ 
+             Map<String, String> newLogfeederProperties = new HashMap<>();
+             newLogfeederProperties.put("logfeeder.include.default.level", defaultLogLevels);
+             updateConfigurationPropertiesForCluster(cluster, "logfeeder-properties", newLogfeederProperties, true, true);
+           }
+ 
+           Config logFeederLog4jProperties = cluster.getDesiredConfigByType("logfeeder-log4j");
+           if (logFeederLog4jProperties != null) {
+             String content = logFeederLog4jProperties.getProperties().get("content");
+             if (content.contains("<!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\">")) {
+               content = content.replace("<!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\">", "<!DOCTYPE log4j:configuration SYSTEM \"http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd\">");
+               updateConfigurationPropertiesForCluster(cluster, "logfeeder-log4j", Collections.singletonMap("content", content), true, true);
+             }
+           }
+ 
+           Config logSearchLog4jProperties = cluster.getDesiredConfigByType("logsearch-log4j");
+           if (logSearchLog4jProperties != null) {
+             String content = logSearchLog4jProperties.getProperties().get("content");
+             if (content.contains("<!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\">")) {
+               content = content.replace("<!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\">", "<!DOCTYPE log4j:configuration SYSTEM \"http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd\">");
+               updateConfigurationPropertiesForCluster(cluster, "logsearch-log4j", Collections.singletonMap("content", content), true, true);
+             }
+           }
+ 
+           Config logSearchServiceLogsConfig = cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig");
+           if (logSearchServiceLogsConfig != null) {
+             String content = logSearchServiceLogsConfig.getProperties().get("content");
+             if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+               content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+               updateConfigurationPropertiesForCluster(cluster, "logsearch-service_logs-solrconfig", Collections.singletonMap("content", content), true, true);
+             }
+           }
+ 
+           Config logSearchAuditLogsConfig = cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig");
+           if (logSearchAuditLogsConfig != null) {
+             String content = logSearchAuditLogsConfig.getProperties().get("content");
+             if (content.contains("class=\"solr.admin.AdminHandlers\"")) {
+               content = content.replaceAll("(?s)<requestHandler name=\"/admin/\".*?class=\"solr.admin.AdminHandlers\" />", "");
+               updateConfigurationPropertiesForCluster(cluster, "logsearch-audit_logs-solrconfig", Collections.singletonMap("content", content), true, true);
              }
-             
-             Set<String> removeProperties = new HashSet<>();
-             removeProperties.add("service_name");
-             removeProperties.add("component_mappings");
-             removeProperties.add("content");
-             
-             removeConfigurationPropertiesFromCluster(cluster, configType, removeProperties);
+           }
 -          
++
+           Config logFeederOutputConfig = cluster.getDesiredConfigByType("logfeeder-output-config");
+           if (logFeederOutputConfig != null) {
+             String content = logFeederOutputConfig.getProperties().get("content");
+             content = content.replace(
+                 "      \"collection\":\"{{logsearch_solr_collection_service_logs}}\",\n" +
+                 "      \"number_of_shards\": \"{{logsearch_collection_service_logs_numshards}}\",\n" +
+                 "      \"splits_interval_mins\": \"{{logsearch_service_logs_split_interval_mins}}\",\n",
+                 "      \"type\": \"service\",\n");
+ 
+             content = content.replace(
+                 "      \"collection\":\"{{logsearch_solr_collection_audit_logs}}\",\n" +
+                 "      \"number_of_shards\": \"{{logsearch_collection_audit_logs_numshards}}\",\n" +
+                 "      \"splits_interval_mins\": \"{{logsearch_audit_logs_split_interval_mins}}\",\n",
+                 "      \"type\": \"audit\",\n");
+ 
+             updateConfigurationPropertiesForCluster(cluster, "logfeeder-output-config", Collections.singletonMap("content", content), true, true);
            }
          }
        }

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
index c7b2f79,c50276e..aa2e7ab
--- a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
@@@ -975,10 -964,15 +964,15 @@@ public class ViewRegistry 
      try {
        org.apache.ambari.server.state.Cluster cluster = clusters.getClusterById(clusterId);
        String clusterName = cluster.getClusterName();
 -      
 +
-       StackId stackId = cluster.getCurrentStackVersion();
+       Set<StackId> stackIds = new HashSet<>();
        Set<String> serviceNames = cluster.getServices().keySet();
 -      
 +
+       for (String serviceName : serviceNames) {
+         Service service = cluster.getService(serviceName);
+         stackIds.add(service.getDesiredStackId());
+       }
+ 
        for (ViewEntity viewEntity : getDefinitions()) {
  
          String viewName = viewEntity.getName();