You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/07/13 19:14:34 UTC
[16/37] ambari git commit: AMBARI-21450. Initial cherry-picking for
feature branch (ncole)
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
deleted file mode 100644
index cd7ae45..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ /dev/null
@@ -1,1344 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.CommandExecutionType;
-import org.apache.ambari.server.collections.Predicate;
-import org.apache.ambari.server.collections.functors.ContainsPredicate;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.AlertsDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
-import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.view.ViewArchiveUtility;
-import org.apache.ambari.server.view.ViewInstanceOperationHandler;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.support.JdbcUtils;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Upgrade catalog for version 2.5.0.
- */
-public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
-
- protected static final String HOST_VERSION_TABLE = "host_version";
- protected static final String GROUPS_TABLE = "groups";
- protected static final String GROUP_TYPE_COL = "group_type";
- private static final String AMS_ENV = "ams-env";
- private static final String AMS_GRAFANA_INI = "ams-grafana-ini";
- private static final String AMS_SITE = "ams-site";
- private static final String AMS_LOG4J = "ams-log4j";
- private static final String AMS_HBASE_LOG4J = "ams-hbase-log4j";
- private static final String AMS_MODE = "timeline.metrics.service.operation.mode";
- private static final String AMS_HBASE_SITE = "ams-hbase-site";
- private static final String HBASE_ROOTDIR = "hbase.rootdir";
- private static final String HADOOP_ENV = "hadoop-env";
- private static final String KAFKA_BROKER = "kafka-broker";
- private static final String YARN_SITE_CONFIG = "yarn-site";
- private static final String YARN_ENV_CONFIG = "yarn-env";
- private static final String YARN_LCE_CGROUPS_MOUNT_PATH = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
- private static final String YARN_CGROUPS_ENABLED = "yarn_cgroups_enabled";
- private static final String KAFKA_TIMELINE_METRICS_HOST = "kafka.timeline.metrics.host";
-
- public static final String COMPONENT_TABLE = "servicecomponentdesiredstate";
- public static final String COMPONENT_VERSION_TABLE = "servicecomponent_version";
- public static final String COMPONENT_VERSION_PK = "PK_sc_version";
- public static final String COMPONENT_VERSION_FK_COMPONENT = "FK_scv_component_id";
- public static final String COMPONENT_VERSION_FK_REPO_VERSION = "FK_scv_repo_version_id";
-
- protected static final String SERVICE_DESIRED_STATE_TABLE = "servicedesiredstate";
- protected static final String CREDENTIAL_STORE_ENABLED_COL = "credential_store_enabled";
-
- protected static final String HOST_COMPONENT_DESIREDSTATE_TABLE = "hostcomponentdesiredstate";
- protected static final String HOST_COMPONENT_DESIREDSTATE_ID_COL = "id";
- protected static final String HOST_COMPONENT_DESIREDSTATE_INDEX = "UQ_hcdesiredstate_name";
-
- @Inject
- ViewInstanceDAO viewInstanceDAO;
-
- @Inject
- ViewInstanceOperationHandler viewInstanceOperationHandler;
-
- @Inject
- protected ViewArchiveUtility archiveUtility;
-
- /**
- * Logger.
- */
- private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog250.class);
-
- @Inject
- DaoUtils daoUtils;
-
- // ----- Constructors ------------------------------------------------------
-
- /**
- * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
- *
- * @param injector Guice injector to track dependencies and uses bindings to inject them.
- */
- @Inject
- public UpgradeCatalog250(Injector injector) {
- super(injector);
-
- daoUtils = injector.getInstance(DaoUtils.class);
- }
-
- // ----- UpgradeCatalog ----------------------------------------------------
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getTargetVersion() {
- return "2.5.0";
- }
-
- // ----- AbstractUpgradeCatalog --------------------------------------------
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getSourceVersion() {
- return "2.4.2";
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected void executeDDLUpdates() throws AmbariException, SQLException {
- updateHostVersionTable();
- createComponentVersionTable();
- updateGroupsTable();
- dbAccessor.addColumn("stage",
- new DBAccessor.DBColumnInfo("command_execution_type", String.class, 32, CommandExecutionType.STAGE.toString(),
- false));
- updateServiceDesiredStateTable();
- updateHostComponentDesiredStateTable();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected void executeDMLUpdates() throws AmbariException, SQLException {
- addNewConfigurationsFromXml();
- updateAMSConfigs();
- updateStormAlerts();
- updateLogSearchAlert();
- removeAlertDuplicates();
- updateHadoopEnvConfigs();
- updateKafkaConfigs();
- updateHIVEInteractiveConfigs();
- unInstallAllZeppelinViews();
- updateZeppelinConfigs();
- updateAtlasConfigs();
- updateLogSearchConfigs();
- updateAmbariInfraConfigs();
- updateYarnSite();
- updateRangerUrlConfigs();
- addManageServiceAutoStartPermissions();
- addManageAlertNotificationsPermissions();
- updateKerberosDescriptorArtifacts();
- fixHBaseMasterCPUUtilizationAlertDefinition();
- updateTezHistoryUrlBase();
- }
-
- /**
- * Fix the HBase Master CPU Utilization alert definition by swapping the values for <code>kerberos_keytab</code>
- * and <code>kerberos_principal</code>.
- */
- protected void fixHBaseMasterCPUUtilizationAlertDefinition() {
- AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- for (final Cluster cluster : clusterMap.values()) {
- long clusterID = cluster.getClusterId();
- AlertDefinitionEntity alertDefinition = alertDefinitionDAO.findByName(clusterID, "hbase_master_cpu");
- if(alertDefinition != null) {
- LOG.info("Updating alert definition {} in cluster {}", alertDefinition.getDefinitionName(), clusterID);
- String source = alertDefinition.getSource();
-
- if(source != null) {
- JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
- if(sourceJson != null) {
- boolean changesExist = false;
- LOG.debug("Source before update : {}", sourceJson);
-
- JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
- JsonPrimitive primitive;
-
- if (uriJson != null) {
- // Replace
- // "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}"
- // With
- // "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
- primitive = uriJson.getAsJsonPrimitive("kerberos_keytab");
- if ((primitive != null) && primitive.isString() && "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}".equals(primitive.getAsString())) {
- uriJson.remove("kerberos_keytab");
- uriJson.addProperty("kerberos_keytab", "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}");
- changesExist = true;
- }
-
- // Replace
- // "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
- // With
- // "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}"
- primitive = uriJson.getAsJsonPrimitive("kerberos_principal");
- if ((primitive != null) && primitive.isString() && "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}".equals(primitive.getAsString())) {
- uriJson.remove("kerberos_principal");
- uriJson.addProperty("kerberos_principal", "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}");
- changesExist = true;
- }
- }
-
- LOG.debug("Source after update : {}", sourceJson);
- if(changesExist) {
- alertDefinition.setSource(sourceJson.toString());
- alertDefinition.setHash(UUID.randomUUID().toString());
-
- alertDefinitionDAO.merge(alertDefinition);
- }
- }
- }
- }
- }
- }
-
- /**
- * Removes all {@link AlertCurrentEntity} duplicates from database.
- * Alerts are considered as duplicates if their definition, host and alert instance are the same.
- * Duplicates could be created in earlier versions of Ambari up till 2.4.1.
- */
- protected void removeAlertDuplicates() {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- AlertsDAO alertsDao = injector.getInstance(AlertsDAO.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- for (final Cluster cluster : clusterMap.values()) {
- long clusterID = cluster.getClusterId();
- LOG.info("Removing alert duplicates on cluster {}", cluster.getClusterName());
- List<AlertCurrentEntity> alertCurrentEntities = alertsDao.findCurrentByCluster(clusterID);
- Set<AlertHistoryEntity> uniqueAlerts = new HashSet<>();
- for (AlertCurrentEntity alertCurrentEntity : alertCurrentEntities) {
-
- AlertHistoryEntity currentAlert = new AlertHistoryEntity();
- currentAlert.setAlertDefinition(alertCurrentEntity.getAlertHistory().getAlertDefinition());
- currentAlert.setHostName(alertCurrentEntity.getAlertHistory().getHostName());
- currentAlert.setAlertInstance(alertCurrentEntity.getAlertHistory().getAlertInstance());
-
- if (uniqueAlerts.contains(currentAlert)) {
- LOG.info("Alert entity duplicate {} will be removed",alertCurrentEntity.getAlertHistory());
- alertsDao.remove(alertCurrentEntity);
- } else {
- uniqueAlerts.add(currentAlert);
- }
- }
- }
- }
-
- protected void updateStormAlerts() {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- for (final Cluster cluster : clusterMap.values()) {
- long clusterID = cluster.getClusterId();
- LOG.info("Updating storm alert definitions on cluster : " + cluster.getClusterName());
-
- final AlertDefinitionEntity stormServerProcessDefinitionEntity = alertDefinitionDAO.findByName(
- clusterID, "storm_server_process");
-
- final AlertDefinitionEntity stormWebAlert = alertDefinitionDAO.findByName(
- clusterID, "storm_webui");
-
- if (stormServerProcessDefinitionEntity != null) {
- LOG.info("Removing alert definition : " + stormServerProcessDefinitionEntity.toString());
- alertDefinitionDAO.remove(stormServerProcessDefinitionEntity);
- }
-
- if (stormWebAlert != null) {
- LOG.info("Updating alert definition : " + stormWebAlert.getDefinitionName());
- String source = stormWebAlert.getSource();
- JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
- LOG.debug("Source before update : " + sourceJson);
-
- JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
- uriJson.remove("https");
- uriJson.remove("https_property");
- uriJson.remove("https_property_value");
- uriJson.addProperty("https", "{{storm-site/ui.https.port}}");
- uriJson.addProperty("https_property", "{{storm-site/ui.https.keystore.type}}");
- uriJson.addProperty("https_property_value", "jks");
-
- LOG.debug("Source after update : " + sourceJson);
- stormWebAlert.setSource(sourceJson.toString());
- alertDefinitionDAO.merge(stormWebAlert);
- }
- }
- }
-
- protected void updateLogSearchAlert() {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- for (final Cluster cluster : clusterMap.values()) {
- long clusterID = cluster.getClusterId();
- LOG.info("Updating Log Search web ui alert definitions on cluster : " + cluster.getClusterName());
-
- final AlertDefinitionEntity logSearchWebAlert = alertDefinitionDAO.findByName(
- clusterID, "logsearch_ui");
-
- if (logSearchWebAlert != null) {
- LOG.info("Updating alert definition : " + logSearchWebAlert.getDefinitionName());
- String source = logSearchWebAlert.getSource();
- JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
- LOG.debug("Source before update : " + sourceJson);
-
- JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
- uriJson.remove("https_property");
- uriJson.remove("https_property_value");
- uriJson.addProperty("https_property", "{{logsearch-env/logsearch_ui_protocol}}");
- uriJson.addProperty("https_property_value", "https");
-
- LOG.debug("Source after update : " + sourceJson);
- logSearchWebAlert.setSource(sourceJson.toString());
- alertDefinitionDAO.merge(logSearchWebAlert);
- }
- }
- }
-
- /**
- * This will check if previous value of 'tez.tez-ui.history-url.base' contains tez view's url.
- * If yes then it will point it to fixed url of tez view auto view as introduced in ambari-2.5.0.0.
- * else it will log an error and move ahead.
- */
- protected void updateTezHistoryUrlBase() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Set<String> installedServices = cluster.getServices().keySet();
- if (installedServices.contains("TEZ")) {
- Config tezSite = cluster.getDesiredConfigByType("tez-site");
- if (tezSite != null) {
- String currentTezHistoryUrlBase = tezSite.getProperties().get("tez.tez-ui.history-url.base");
- if (!StringUtils.isEmpty(currentTezHistoryUrlBase)) {
- LOG.info("Current Tez History URL base: {} ", currentTezHistoryUrlBase);
- String newTezHistoryUrlBase = null;
- try {
- newTezHistoryUrlBase = getUpdatedTezHistoryUrlBase(currentTezHistoryUrlBase);
- } catch (AmbariException e) {
- LOG.error("Error occurred while creating updated URL of tez view using value in property tez.tez-ui.history-url.base." +
- "The current value {} is not of standard format expected by Ambari. Skipping the updation of tez.tez-ui.history-url.base." +
- "Please check validity of this property manually in tez site after upgrade.", currentTezHistoryUrlBase, e);
- return;
- }
- LOG.info("New Tez History URL base: {} ", newTezHistoryUrlBase);
- updateConfigurationProperties("tez-site", Collections.singletonMap("tez.tez-ui.history-url.base", newTezHistoryUrlBase), true, false);
- }
- }
- }
- }
- }
- }
- }
-
- /**
- * Transforms the existing tez history url base to the fixed short url for tez auto instance
- * @param currentTezHistoryUrlBase Existing value of the tez history url base
- * @return the updated tez history url base
- * @throws AmbariException if currentTezHistoryUrlBase is malformed or is not compatible with the Tez View url REGEX
- */
- protected String getUpdatedTezHistoryUrlBase(String currentTezHistoryUrlBase) throws AmbariException{
- String pattern = "(.*)(\\/views\\/TEZ\\/)(.*)";
- Pattern regex = Pattern.compile(pattern);
- Matcher matcher = regex.matcher(currentTezHistoryUrlBase);
- String prefix;
- if (matcher.find()) {
- prefix = matcher.group(1);
- } else {
- throw new AmbariException("Cannot prepare the new value for property: 'tez.tez-ui.history-url.base' using the old value: '" + currentTezHistoryUrlBase + "'");
- }
-
- // adding the auto tez instance short url name instead of the tez version and tez view instance name
- return prefix + "/view/TEZ/tez_cluster_instance";
- }
-
- protected void updateHostVersionTable() throws SQLException {
- LOG.info("Updating the {} table", HOST_VERSION_TABLE);
-
- // Add the unique constraint to the host_version table
- dbAccessor.addUniqueConstraint(HOST_VERSION_TABLE, "UQ_host_repo", "repo_version_id", "host_id");
- }
-
- protected void updateGroupsTable() throws SQLException {
- LOG.info("Updating the {} table", GROUPS_TABLE);
-
- dbAccessor.addColumn(GROUPS_TABLE, new DBColumnInfo(GROUP_TYPE_COL, String.class, null, "LOCAL", false));
- dbAccessor.executeQuery("UPDATE groups SET group_type='LDAP' WHERE ldap_group=1");
- dbAccessor.addUniqueConstraint(GROUPS_TABLE, "UNQ_groups_0", "group_name", "group_type");
- }
-
- /**
- * Updates {@code yarn-site} in the following ways:
- *
- * Remove {@code YARN_LCE_CGROUPS_MOUNT_PATH} if {@code YARN_CGROUPS_ENABLED} is {@code false} and
- * {@code YARN_LCE_CGROUPS_MOUNT_PATH} is empty string
- *
- * @throws AmbariException
- */
- protected void updateYarnSite() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- for (final Cluster cluster : clusterMap.values()) {
- Config yarnEnvConfig = cluster.getDesiredConfigByType(YARN_ENV_CONFIG);
- Config yarnSiteConfig = cluster.getDesiredConfigByType(YARN_SITE_CONFIG);
- if (yarnEnvConfig != null && yarnSiteConfig != null) {
- String cgroupEnabled = yarnEnvConfig.getProperties().get(YARN_CGROUPS_ENABLED);
- String mountPath = yarnSiteConfig.getProperties().get(YARN_LCE_CGROUPS_MOUNT_PATH);
- if (StringUtils.isEmpty(mountPath) && cgroupEnabled != null
- && cgroupEnabled.trim().equalsIgnoreCase("false")){
- removeConfigurationPropertiesFromCluster(cluster, YARN_SITE_CONFIG, new HashSet<String>(){{
- add(YARN_LCE_CGROUPS_MOUNT_PATH);
- }});
- }
- }
- }
- }
-
- protected void updateAMSConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
-
- Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
- if (amsEnv != null) {
- Map<String, String> amsEnvProperties = amsEnv.getProperties();
- String content = amsEnvProperties.get("content");
- Map<String, String> newProperties = new HashMap<>();
- newProperties.put("content", updateAmsEnvContent(content));
- updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
- }
-
-
- boolean isDistributed = false;
- Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
- if (amsSite != null) {
- if ("distributed".equals(amsSite.getProperties().get(AMS_MODE))) {
- isDistributed = true;
- }
-
- Map<String, String> amsSiteProperties = amsSite.getProperties();
-
- if (amsSiteProperties != null && amsSiteProperties.containsKey("timeline.metrics.hbase.fifo.compaction.enabled")) {
- LOG.info("Removing timeline.metrics.hbase.fifo.compaction.enabled from ams-site");
- removeConfigurationPropertiesFromCluster(cluster, AMS_SITE, Collections.singleton("timeline.metrics.hbase.fifo.compaction.enabled"));
- }
- }
-
- if (isDistributed) {
- Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
- if (amsHbaseSite != null) {
- Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
- String rootDir = amsHbaseSiteProperties.get(HBASE_ROOTDIR);
- if (StringUtils.isNotEmpty(rootDir) && rootDir.startsWith("hdfs://")) {
- int indexOfSlash = rootDir.indexOf("/", 7);
- Map<String, String> newProperties = new HashMap<>();
- String newRootdir = rootDir.substring(indexOfSlash);
- newProperties.put(HBASE_ROOTDIR, newRootdir);
- LOG.info("Changing ams-hbase-site rootdir to " + newRootdir);
- updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
- }
- }
- }
-
- //Update AMS log4j to make rolling properties configurable as separate fields.
- Config amsLog4jProperties = cluster.getDesiredConfigByType(AMS_LOG4J);
- if(amsLog4jProperties != null){
- Map<String, String> newProperties = new HashMap<>();
-
- String content = amsLog4jProperties.getProperties().get("content");
- content = SchemaUpgradeUtil.extractProperty(content,"ams_log_max_backup_size","ams_log_max_backup_size","log4j.appender.file.MaxFileSize=(\\w+)MB","80",newProperties);
- content = SchemaUpgradeUtil.extractProperty(content,"ams_log_number_of_backup_files","ams_log_number_of_backup_files","log4j.appender.file.MaxBackupIndex=(\\w+)","60",newProperties);
- newProperties.put("content",content);
- updateConfigurationPropertiesForCluster(cluster,AMS_LOG4J,newProperties,true,true);
- }
-
- Config amsHbaseLog4jProperties = cluster.getDesiredConfigByType(AMS_HBASE_LOG4J);
- if(amsHbaseLog4jProperties != null){
- Map<String, String> newProperties = new HashMap<>();
-
- String content = amsHbaseLog4jProperties.getProperties().get("content");
- content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxfilesize","ams_hbase_log_maxfilesize","hbase.log.maxfilesize=(\\w+)MB","256",newProperties);
- content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxbackupindex","ams_hbase_log_maxbackupindex","hbase.log.maxbackupindex=(\\w+)","20",newProperties);
- content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxfilesize","ams_hbase_security_log_maxfilesize","hbase.security.log.maxfilesize=(\\w+)MB","256",newProperties);
- content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxbackupindex","ams_hbase_security_log_maxbackupindex","hbase.security.log.maxbackupindex=(\\w+)","20",newProperties);
- newProperties.put("content",content);
- updateConfigurationPropertiesForCluster(cluster,AMS_HBASE_LOG4J,newProperties,true,true);
- }
-
- Config amsGrafanaIni = cluster.getDesiredConfigByType(AMS_GRAFANA_INI);
- if (amsGrafanaIni != null) {
- Map<String, String> amsGrafanaIniProperties = amsGrafanaIni.getProperties();
- String content = amsGrafanaIniProperties.get("content");
- Map<String, String> newProperties = new HashMap<>();
- newProperties.put("content", updateAmsGrafanaIniContent(content));
- updateConfigurationPropertiesForCluster(cluster, AMS_GRAFANA_INI, newProperties, true, true);
- }
-
- }
- }
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
- if (artifactEntity != null) {
- Map<String, Object> data = artifactEntity.getArtifactData();
-
- if (data != null) {
- final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
-
- if (kerberosDescriptor != null) {
- KerberosServiceDescriptor logSearchKerberosDescriptor = kerberosDescriptor.getService("LOGSEARCH");
- KerberosServiceDescriptor atlasKerberosDescriptor = kerberosDescriptor.getService("ATLAS");
- KerberosServiceDescriptor rangerKerberosDescriptor = kerberosDescriptor.getService("RANGER");
- addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, atlasKerberosDescriptor, "ATLAS_SERVER");
- addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, logSearchKerberosDescriptor, "LOGSEARCH_SERVER");
- addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, rangerKerberosDescriptor, "RANGER_ADMIN");
- KerberosServiceDescriptor stormKerberosDescriptor = kerberosDescriptor.getService("STORM");
-
- if (stormKerberosDescriptor != null) {
- KerberosComponentDescriptor componentDescriptor = stormKerberosDescriptor.getComponent("NIMBUS");
- if (componentDescriptor != null) {
- KerberosIdentityDescriptor origIdentityDescriptor = componentDescriptor.getIdentity("/STORM/NIMBUS/nimbus_server");
- if (origIdentityDescriptor != null) {
- KerberosPrincipalDescriptor origPrincipalDescriptor = origIdentityDescriptor.getPrincipalDescriptor();
- KerberosPrincipalDescriptor newPrincipalDescriptor = new KerberosPrincipalDescriptor(
- null,
- null,
- (origPrincipalDescriptor == null) ?
- "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal" : origPrincipalDescriptor.getConfiguration(),
- null
- );
- KerberosKeytabDescriptor origKeytabDescriptor = origIdentityDescriptor.getKeytabDescriptor();
- KerberosKeytabDescriptor newKeytabDescriptor = new KerberosKeytabDescriptor(
- null,
- null,
- null,
- null,
- null,
- (origKeytabDescriptor == null) ?
- "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab" : origKeytabDescriptor.getConfiguration(),
- false);
- componentDescriptor.removeIdentity("/STORM/NIMBUS/nimbus_server");
- componentDescriptor.putIdentity(new KerberosIdentityDescriptor("/STORM/storm_components", null, newPrincipalDescriptor, newKeytabDescriptor, null));
- }
- }
- }
- KerberosServiceDescriptor yarnKerberosDescriptor = kerberosDescriptor.getService("YARN");
- if (yarnKerberosDescriptor != null) {
- Map<String, KerberosConfigurationDescriptor> configs = yarnKerberosDescriptor.getConfigurations();
- KerberosConfigurationDescriptor yarnSiteConfigDescriptor = configs.get("yarn-site");
- if (yarnSiteConfigDescriptor != null) {
- Map<String, String> properties = yarnSiteConfigDescriptor.getProperties();
- if (properties != null && properties.containsKey(YARN_LCE_CGROUPS_MOUNT_PATH)) {
- properties.remove(YARN_LCE_CGROUPS_MOUNT_PATH);
- }
- }
- }
-
- // Fix HBASE_MASTER Kerberos identity for Ranger audit by clearing out any keytab file or principal name values.
- KerberosServiceDescriptor hbaseKerberosDescriptor = kerberosDescriptor.getService("HBASE");
- if (hbaseKerberosDescriptor != null) {
- KerberosComponentDescriptor hbaseMasterKerberosDescriptor = hbaseKerberosDescriptor.getComponent("HBASE_MASTER");
- if (hbaseMasterKerberosDescriptor != null) {
- KerberosIdentityDescriptor identityDescriptor = hbaseMasterKerberosDescriptor.getIdentity("/HBASE/HBASE_MASTER/hbase_master_hbase");
-
- if (identityDescriptor != null) {
- KerberosPrincipalDescriptor principalDescriptor = identityDescriptor.getPrincipalDescriptor();
- KerberosKeytabDescriptor keytabDescriptor = identityDescriptor.getKeytabDescriptor();
-
- identityDescriptor.setReference(identityDescriptor.getName());
- identityDescriptor.setName("ranger_hbase_audit");
-
- principalDescriptor.setValue(null);
- keytabDescriptor.setFile(null);
- }
- }
- }
-
- artifactEntity.setArtifactData(kerberosDescriptor.toMap());
- artifactDAO.merge(artifactEntity);
- }
- }
- }
- }
-
- /**
- * Add /AMBARI-INFRA/INFRA_SOLR/infra-solr reference to specific service component
- */
- private void addInfrSolrDescriptor(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity, KerberosDescriptor kerberosDescriptor,
- KerberosServiceDescriptor serviceDescriptor, String componentName) {
- if (serviceDescriptor != null) {
- KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent(componentName);
- if (componentDescriptor != null) {
- KerberosIdentityDescriptor origIdentityDescriptor = componentDescriptor.getIdentity("/AMBARI_INFRA/INFRA_SOLR/infra-solr");
- if (origIdentityDescriptor != null) {
- LOG.info("/AMBARI_INFRA/INFRA_SOLR/infra-solr identity already exists in {} component", componentName);
- } else {
- Predicate predicate = ContainsPredicate.fromMap(Collections.<String, Object>singletonMap(ContainsPredicate.NAME, Arrays.asList("services", "AMBARI_INFRA")));
- componentDescriptor.putIdentity(new KerberosIdentityDescriptor("/AMBARI_INFRA/INFRA_SOLR/infra-solr",null, null, null, predicate));
- }
- }
- }
- }
-
- protected void unInstallAllZeppelinViews(){
- LOG.info("Removing all Zeppelin views.");
- List<ViewInstanceEntity> viewInstanceList = viewInstanceDAO.findAll();
- for( ViewInstanceEntity viewInstanceEntity : viewInstanceList ){
- if(viewInstanceEntity.getViewName().equalsIgnoreCase("ZEPPELIN{1.0.0}")){
- LOG.info("Uninstalling zeppelin view : {}", viewInstanceEntity);
- try {
- viewInstanceOperationHandler.uninstallViewInstance(viewInstanceEntity);
- }catch(Exception e){
- LOG.error("Exception occurred while uninstalling view {}. Ignored for now.", viewInstanceEntity);
- }
- }
- }
- }
-
- /**
- * Updates Zeppelin configs.
- *
- * @throws AmbariException
- */
- protected void updateZeppelinConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Config zeppelinEnvProperties = cluster.getDesiredConfigByType("zeppelin-env");
- if (zeppelinEnvProperties != null) {
- String log4jPropertiesContent = zeppelinEnvProperties.getProperties().get("log4j_properties_content");
- String shiroIniContent = zeppelinEnvProperties.getProperties().get("shiro_ini_content");
-
- updateConfigurationProperties("zeppelin-log4j-properties", Collections.singletonMap("log4j_properties_content", log4jPropertiesContent), true, true);
- updateConfigurationProperties("zeppelin-shiro-ini", Collections.singletonMap("shiro_ini_content", shiroIniContent), true, true);
- }
- }
- }
- }
- }
-
- protected String updateAmsGrafanaIniContent(String content) {
- if (content == null) {
- return null;
- }
-
- String toReplace = "admin_password = {{ams_grafana_admin_pwd}}";
- String replaceWith = ";admin_password =";
- content = content.replace(toReplace, replaceWith);
- return content;
- }
-
- protected String updateAmsEnvContent(String content) {
- if (content == null) {
- return null;
- }
-
- List<String> toReplaceList = new ArrayList<>();
- toReplaceList.add("\n# HBase normalizer enabled\n");
- toReplaceList.add("\n# HBase compaction policy enabled\n");
- toReplaceList.add("export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n");
- toReplaceList.add("export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n");
-
- //Because of AMBARI-15331 : AMS HBase FIFO compaction policy and Normalizer settings are not handled correctly
- toReplaceList.add("export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n");
- toReplaceList.add("export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n");
-
-
- for (String toReplace : toReplaceList) {
- if (content.contains(toReplace)) {
- content = content.replace(toReplace, StringUtils.EMPTY);
- }
- }
-
- return content;
- }
-
- protected void updateHadoopEnvConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(
- AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
- Map<String, String> prop = new HashMap<String, String>();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- String content = null;
- Boolean contentUpdated = false;
-
- if (cluster.getDesiredConfigByType(HADOOP_ENV) != null) {
- content = cluster.getDesiredConfigByType(HADOOP_ENV).getProperties().get("content");
- }
-
- if (content != null) {
- if (!content.contains("ulimit -l")) { // Append "ulimit -l" to hadoop-env.sh
- content += "\n" +
- "{% if is_datanode_max_locked_memory_set %}\n" +
- "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
- "# Makes sense to fix only when runing DN as root \n" +
- "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
- " ulimit -l {{datanode_max_locked_memory}}\n" +
- "fi\n" +
- "{% endif %}";
-
- contentUpdated = true;
- }
-
- if (!content.contains("ulimit -n")){ // Append "ulimit -n" to hadoop-env.sh
- content += "\n" +
- "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then \n" +
- " ulimit -n {{hdfs_user_nofile_limit}}\n" +
- "fi";
- contentUpdated = true;
- }
-
- if (contentUpdated){
- prop.put("content", content);
- updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
- prop, true, false);
- }
- }
- }
- }
- }
- }
-
- /**
- * Creates the servicecomponent_version table
- *
- * @throws SQLException
- */
- private void createComponentVersionTable() throws SQLException {
-
- List<DBColumnInfo> columns = new ArrayList<>();
-
- // Add extension link table
- LOG.info("Creating {} table", COMPONENT_VERSION_TABLE);
-
- columns.add(new DBColumnInfo("id", Long.class, null, null, false));
- columns.add(new DBColumnInfo("component_id", Long.class, null, null, false));
- columns.add(new DBColumnInfo("repo_version_id", Long.class, null, null, false));
- columns.add(new DBColumnInfo("state", String.class, 32, null, false));
- columns.add(new DBColumnInfo("user_name", String.class, 255, null, false));
- dbAccessor.createTable(COMPONENT_VERSION_TABLE, columns, (String[]) null);
-
- dbAccessor.addPKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_PK, "id");
-
- dbAccessor.addFKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_FK_COMPONENT, "component_id",
- COMPONENT_TABLE, "id", false);
-
- dbAccessor.addFKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_FK_REPO_VERSION, "repo_version_id",
- "repo_version", "repo_version_id", false);
-
- addSequence("servicecomponent_version_id_seq", 0L, false);
- }
-
- protected void updateKafkaConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
-
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
-
- Config kafkaBrokerConfig = cluster.getDesiredConfigByType(KAFKA_BROKER);
- if (kafkaBrokerConfig != null) {
- Map<String, String> kafkaBrokerProperties = kafkaBrokerConfig.getProperties();
-
- if (kafkaBrokerProperties != null && kafkaBrokerProperties.containsKey(KAFKA_TIMELINE_METRICS_HOST)) {
- LOG.info("Removing kafka.timeline.metrics.host from kafka-broker");
- removeConfigurationPropertiesFromCluster(cluster, KAFKA_BROKER, Collections.singleton("kafka.timeline.metrics.host"));
- }
- }
- }
- }
- }
- }
-
- /**
- * Alter servicedesiredstate table.
- * @throws SQLException
- */
- private void updateServiceDesiredStateTable() throws SQLException {
- // ALTER TABLE servicedesiredstate ADD COLUMN
- // credential_store_enabled SMALLINT DEFAULT 0 NOT NULL
- dbAccessor.addColumn(SERVICE_DESIRED_STATE_TABLE,
- new DBColumnInfo(CREDENTIAL_STORE_ENABLED_COL, Short.class, null, 0, false));
- }
-
-
- /**
- * Removes the compound PK from hostcomponentdesiredstate table
- * and replaces it with a surrogate PK, but only if the table doesn't have it's new PK set.
- * Create index and unqiue constraint on the columns that originally formed the compound PK.
- *
- * @throws SQLException
- */
- private void updateHostComponentDesiredStateTable() throws SQLException {
- if (dbAccessor.tableHasPrimaryKey(HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL)) {
- LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
- HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL);
-
- return;
- }
- // add the new ID column as nullable until we populate
- dbAccessor.addColumn(HOST_COMPONENT_DESIREDSTATE_TABLE,
- new DBColumnInfo(HOST_COMPONENT_DESIREDSTATE_ID_COL, Long.class, null, null, true));
-
- // insert sequence values
- AtomicLong id = new AtomicLong(1);
- Statement statement = null;
- ResultSet resultSet = null;
-
- try {
- statement = dbAccessor.getConnection().createStatement();
-
- if (statement != null) {
- // Select records by old PK
- String selectSQL = String.format(
- "SELECT cluster_id, component_name, host_id, service_name FROM %s", HOST_COMPONENT_DESIREDSTATE_TABLE);
-
- resultSet = statement.executeQuery(selectSQL);
-
- while (resultSet.next()) {
- final Long clusterId = resultSet.getLong("cluster_id");
- final String componentName = resultSet.getString("component_name");
- final Long hostId = resultSet.getLong("host_id");
- final String serviceName = resultSet.getString("service_name");
-
- String updateSQL = String.format(
- "UPDATE %s SET %s = %s WHERE cluster_id = %d AND component_name = '%s' AND service_name = '%s' AND host_id = %d",
- HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL, id.getAndIncrement(),
- clusterId, componentName, serviceName, hostId);
-
- dbAccessor.executeQuery(updateSQL);
- }
-
- // Add sequence for hostcomponentdesiredstate table ids
- addSequence("hostcomponentdesiredstate_id_seq", id.get(), false);
- }
-
- }
- finally {
- JdbcUtils.closeResultSet(resultSet);
- JdbcUtils.closeStatement(statement);
- }
-
- // make the ID column NON NULL now
- dbAccessor.alterColumn(HOST_COMPONENT_DESIREDSTATE_TABLE,
- new DBColumnInfo(HOST_COMPONENT_DESIREDSTATE_ID_COL, Long.class, null, null, false));
-
- // drop existing PK and create new one on ID column
- String primaryKeyConstraintName = null;
- Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
- switch (databaseType) {
- case POSTGRES:
- case MYSQL:
- case ORACLE:
- case SQL_SERVER:
- primaryKeyConstraintName = dbAccessor.getPrimaryKeyConstraintName(HOST_COMPONENT_DESIREDSTATE_TABLE);
- break;
-
- default:
- throw new UnsupportedOperationException(String.format("Invalid database type '%s'", databaseType));
-
- }
-
- // warn if we can't find it
- if (null == primaryKeyConstraintName) {
- LOG.warn("Unable to determine the primary key constraint name for {}", HOST_COMPONENT_DESIREDSTATE_TABLE);
- }
- else {
- dbAccessor.dropPKConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, primaryKeyConstraintName, true);
- }
-
- // create a new PK, matching the name of the constraint found in the SQL files
- dbAccessor.addPKConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, "PK_hostcomponentdesiredstate", "id");
-
- // create index, ensuring column order matches that of the SQL files
- dbAccessor.addUniqueConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_INDEX,
- "component_name", "service_name", "host_id", "cluster_id");
- }
-
- protected void updateAtlasConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
- updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
- updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
- updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
- }
- }
- }
- }
-
- protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
- Set<String> installedServices = cluster.getServices().keySet();
- if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
- Config configEnv = cluster.getDesiredConfigByType(configType);
- if (configEnv != null) {
- Map<String, String> newProperties = new HashMap<>();
- newProperties.put(propertyName, "true");
- boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
- updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
- }
- }
- }
-
- /**
- * Updates Hive Interactive's config in hive-interactive-site.
- *
- * @throws AmbariException
- */
- private static final String HIVE_INTERACTIVE_ENV = "hive-interactive-env";
- private static final String HIVE_ENV = "hive-env";
- protected void updateHIVEInteractiveConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Config hiveInteractiveEnv = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_ENV);
- Config hiveEnv = cluster.getDesiredConfigByType(HIVE_ENV);
- if (hiveInteractiveEnv != null) {
- String hsiHeapSize = "512";
- if (hiveEnv != null) {
- if (hiveEnv.getProperties().containsKey("hive.heapsize")) {
- hsiHeapSize = hiveEnv.getProperties().get("hive.heapsize");
- LOG.info("Updating HSI heap size to: " + hsiHeapSize);
- }
- }
- updateConfigurationProperties(HIVE_INTERACTIVE_ENV, Collections.singletonMap("hive_heapsize",
- hsiHeapSize), true, true);
- }
- }
- }
- }
- }
-
- /**
- * Updates Log Search configs.
- *
- * @throws AmbariException
- */
- protected void updateLogSearchConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Config logSearchProperties = cluster.getDesiredConfigByType("logsearch-properties");
- if (logSearchProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
- if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.enabled"))
- newProperties.put("logsearch.auth.external_auth.enabled", logSearchProperties.getProperties().get("logsearch.external.auth.enabled"));
- if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.host_url"))
- newProperties.put("logsearch.auth.external_auth.host_url", logSearchProperties.getProperties().get("logsearch.external.auth.host_url"));
- if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.login_url"))
- newProperties.put("logsearch.auth.external_auth.login_url", logSearchProperties.getProperties().get("logsearch.external.auth.login_url"));
-
- Set<String> removeProperties = new HashSet<>();
- removeProperties.add("logsearch.external.auth.enabled");
- removeProperties.add("logsearch.external.auth.host_url");
- removeProperties.add("logsearch.external.auth.login_url");
-
- updateConfigurationPropertiesForCluster(cluster, "logsearch-properties", newProperties, removeProperties, true, true);
- }
-
- Config logfeederEnvProperties = cluster.getDesiredConfigByType("logfeeder-env");
- if (logfeederEnvProperties != null) {
- String content = logfeederEnvProperties.getProperties().get("content");
- if (content.contains("infra_solr_ssl_enabled")) {
- content = content.replace("infra_solr_ssl_enabled", "logfeeder_use_ssl");
- updateConfigurationPropertiesForCluster(cluster, "logfeeder-env", Collections.singletonMap("content", content), true, true);
- }
- }
-
- Config logsearchEnvProperties = cluster.getDesiredConfigByType("logsearch-env");
- if (logsearchEnvProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
- String content = logsearchEnvProperties.getProperties().get("content");
- if (content.contains("infra_solr_ssl_enabled or logsearch_ui_protocol == 'https'")) {
- content = content.replace("infra_solr_ssl_enabled or logsearch_ui_protocol == 'https'", "logsearch_use_ssl");
- }
- if (!content.equals(logsearchEnvProperties.getProperties().get("content"))) {
- newProperties.put("content", content);
- }
-
- if ("http".equals(logsearchEnvProperties.getProperties().get("logsearch_ui_protocol")) &&
- "/etc/security/serverKeys/logsearch.trustStore.jks".equals(logsearchEnvProperties.getProperties().get("logsearch_truststore_location")) &&
- "/etc/security/serverKeys/logsearch.keyStore.jks".equals(logsearchEnvProperties.getProperties().get("logsearch_keystore_location"))) {
- newProperties.put("logsearch_truststore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks");
- newProperties.put("logsearch_keystore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks");
- }
-
- Set<String> removeProperties = new HashSet<>();
- removeProperties.add("logsearch_solr_audit_logs_use_ranger");
- removeProperties.add("logsearch_solr_audit_logs_zk_node");
- removeProperties.add("logsearch_solr_audit_logs_zk_quorum");
-
- updateConfigurationPropertiesForCluster(cluster, "logsearch-env", newProperties, removeProperties, true, true);
- }
-
- Config logfeederLog4jProperties = cluster.getDesiredConfigByType("logfeeder-log4j");
- if (logfeederLog4jProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
-
- String content = logfeederLog4jProperties.getProperties().get("content");
- content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_log_maxfilesize", "logfeeder_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_log_maxbackupindex", "logfeeder_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logfeeder_log_maxfilesize}}MB\"/>\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_json_log_maxfilesize", "logfeeder_json_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
- " <param name=\"append\" value=\"true\" />\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\" />", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_json_log_maxbackupindex", "logfeeder_json_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
- " <param name=\"append\" value=\"true\" />\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logfeeder_json_log_maxfilesize}}MB\" />\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\" />", "10", newProperties);
-
- newProperties.put("content", content);
- updateConfigurationPropertiesForCluster(cluster, "logfeeder-log4j", newProperties, true, true);
- }
-
- Config logsearchLog4jProperties = cluster.getDesiredConfigByType("logsearch-log4j");
- if (logsearchLog4jProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
-
- String content = logsearchLog4jProperties.getProperties().get("content");
- if (content.contains("{{logsearch_log_dir}}/logsearch.err")) {
- content = content.replace("{{logsearch_log_dir}}/logsearch.err", "{{logsearch_log_dir}}/logsearch.log");
- }
- if (content.contains("<priority value=\"warn\"/>")) {
- content = content.replace("<priority value=\"warn\"/>", "<priority value=\"info\"/>");
- }
-
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_log_maxfilesize", "logsearch_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.log\" />\n" +
- " <param name=\"Threshold\" value=\"info\" />\n" +
- " <param name=\"append\" value=\"true\" />\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\" />\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_log_maxbackupindex", "logsearch_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.log\" />\n" +
- " <param name=\"Threshold\" value=\"info\" />\n" +
- " <param name=\"append\" value=\"true\" />\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logsearch_log_maxfilesize}}MB\" />\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\" />\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_json_log_maxfilesize", "logsearch_json_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.json\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_json_log_maxbackupindex", "logsearch_json_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.json\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logsearch_json_log_maxfilesize}}MB\"/>\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_audit_log_maxfilesize", "logsearch_audit_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_audit_log_maxbackupindex", "logsearch_audit_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logsearch_audit_log_maxfilesize}}MB\"/>\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_perf_log_maxfilesize", "logsearch_perf_log_maxfilesize",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
- " <param name=\"Threshold\" value=\"info\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "logsearch_perf_log_maxbackupindex", "logsearch_perf_log_maxbackupindex",
- " <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
- " <param name=\"Threshold\" value=\"info\"/>\n" +
- " <param name=\"append\" value=\"true\"/>\n" +
- " <param name=\"maxFileSize\" value=\"\\{\\{logsearch_perf_log_maxfilesize}}MB\"/>\n" +
- " <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
-
- newProperties.put("content", content);
- if (!content.equals(logsearchLog4jProperties.getProperties().get("content"))) {
- updateConfigurationPropertiesForCluster(cluster, "logsearch-log4j", newProperties, true, true);
- }
- }
- }
- }
- }
- }
-
- /**
- * Updates Ambari Infra configs.
- *
- * @throws AmbariException
- */
- protected void updateAmbariInfraConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = clusters.getClusters();
-
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Config infraSolrEnvProperties = cluster.getDesiredConfigByType("infra-solr-env");
- if (infraSolrEnvProperties != null) {
- String content = infraSolrEnvProperties.getProperties().get("content");
- if (content.contains("SOLR_SSL_TRUST_STORE={{infra_solr_keystore_location}}")) {
- content = content.replace("SOLR_SSL_TRUST_STORE={{infra_solr_keystore_location}}", "SOLR_SSL_TRUST_STORE={{infra_solr_truststore_location}}");
- }
- if (content.contains("SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_keystore_password}}")) {
- content = content.replace("SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_keystore_password}}", "SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_truststore_password}}");
- }
- if (content.contains("SOLR_KERB_NAME_RULES={{infra_solr_kerberos_name_rules}}")) {
- content = content.replace("SOLR_KERB_NAME_RULES={{infra_solr_kerberos_name_rules}}", "SOLR_KERB_NAME_RULES=\"{{infra_solr_kerberos_name_rules}}\"");
- }
- if (content.contains(" -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}")) {
- content = content.replace(" -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}", "");
- }
- if (!content.equals(infraSolrEnvProperties.getProperties().get("content"))) {
- updateConfigurationPropertiesForCluster(cluster, "infra-solr-env", Collections.singletonMap("content", content), true, true);
- }
- }
-
- Config infraSolrLog4jProperties = cluster.getDesiredConfigByType("infra-solr-log4j");
- if (infraSolrLog4jProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
-
- String content = infraSolrLog4jProperties.getProperties().get("content");
- content = SchemaUpgradeUtil.extractProperty(content, "infra_log_maxfilesize", "infra_log_maxfilesize",
- "log4j.appender.file.MaxFileSize=(\\w+)MB", "10", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "infra_log_maxbackupindex", "infra_log_maxbackupindex",
- "log4j.appender.file.MaxBackupIndex=(\\w+)\n", "9", newProperties);
-
- newProperties.put("content", content);
- updateConfigurationPropertiesForCluster(cluster, "infra-solr-log4j", newProperties, true, true);
- }
-
- Config infraSolrClientLog4jProperties = cluster.getDesiredConfigByType("infra-solr-client-log4j");
- if (infraSolrClientLog4jProperties != null) {
- Map<String, String> newProperties = new HashMap<>();
-
- String content = infraSolrClientLog4jProperties.getProperties().get("content");
- if (content.contains("infra_client_log")) {
- content = content.replace("infra_client_log", "solr_client_log");
- }
-
- content = SchemaUpgradeUtil.extractProperty(content, "infra_client_log_maxfilesize", "solr_client_log_maxfilesize",
- "log4j.appender.file.MaxFileSize=(\\w+)MB", "80", newProperties);
- content = SchemaUpgradeUtil.extractProperty(content, "infra_client_log_maxbackupindex", "solr_client_log_maxbackupindex",
- "log4j.appender.file.MaxBackupIndex=(\\w+)\n", "60", newProperties);
-
- newProperties.put("content", content);
- updateConfigurationPropertiesForCluster(cluster, "infra-solr-client-log4j", newProperties, true, true);
- }
- }
- }
- }
- }
-
- /**
- * Add permissions for managing service auto-start.
- * <p>
- * <ul>
- * <li>SERVICE.MANAGE_AUTO_START permissions for SERVICE.ADMINISTRATOR, CLUSTER.OPERATOR, CLUSTER.ADMINISTRATOR, AMBARI.ADMINISTRATOR</li>
- * <li>CLUSTER.MANAGE_AUTO_START permissions for CLUSTER.OPERATOR, CLUSTER.ADMINISTRATOR, AMBARI.ADMINISTRATOR</li>
- * </ul>
- */
- protected void addManageServiceAutoStartPermissions() throws SQLException {
- Collection<String> roles;
-
- // Add service-level auto-start permission
- roles = Arrays.asList(
- "AMBARI.ADMINISTRATOR:AMBARI",
- "CLUSTER.ADMINISTRATOR:CLUSTER",
- "CLUSTER.OPERATOR:CLUSTER",
- "SERVICE.ADMINISTRATOR:CLUSTER");
- addRoleAuthorization("SERVICE.MANAGE_AUTO_START", "Manage service auto-start", roles);
-
- // Add cluster-level auto start-permission
- roles = Arrays.asList(
- "AMBARI.ADMINISTRATOR:AMBARI",
- "CLUSTER.ADMINISTRATOR:CLUSTER",
- "CLUSTER.OPERATOR:CLUSTER");
- addRoleAuthorization("CLUSTER.MANAGE_AUTO_START", "Manage service auto-start configuration", roles);
- }
-
- /**
- * Add permissions for managing alert notifications configuration.
- * <p>
- * <ul>
- * <li>CLUSTER.MANAGE_ALERT_NOTIFICATIONS permissions for AMBARI.ADMINISTRATOR, CLUSTER.ADMINISTRATOR</li>
- * </ul>
- */
- protected void addManageAlertNotificationsPermissions() throws SQLException {
- Collection<String> roles;
- roles = Arrays.asList(
- "AMBARI.ADMINISTRATOR:AMBARI",
- "CLUSTER.ADMINISTRATOR:CLUSTER");
- addRoleAuthorization("CLUSTER.MANAGE_ALERT_NOTIFICATIONS", "Manage alert notifications configuration", roles);
- }
-
- /**
- * Updates Ranger admin url for Ranger plugin supported configs.
- *
- * @throws AmbariException
- */
- protected void updateRangerUrlConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-
- Config ranger_admin_properties = cluster.getDesiredConfigByType("admin-properties");
- if(null != ranger_admin_properties) {
- String policyUrl = ranger_admin_properties.getProperties().get("policymgr_external_url");
- if (null != policyUrl) {
- updateRangerUrl(cluster, "ranger-hdfs-security", "ranger.plugin.hdfs.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-hive-security", "ranger.plugin.hive.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-hbase-security", "ranger.plugin.hbase.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-knox-security", "ranger.plugin.knox.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-storm-security", "ranger.plugin.storm.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-yarn-security", "ranger.plugin.yarn.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-kafka-security", "ranger.plugin.kafka.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-atlas-security", "ranger.plugin.atlas.policy.rest.url", policyUrl);
- updateRangerUrl(cluster, "ranger-kms-security", "ranger.plugin.kms.policy.rest.url", policyUrl);
- }
- }
- }
- }
-
- protected void updateRangerUrl(Cluster cluster, String configType, String configProperty, String policyUrl) throws AmbariException {
- Config componentSecurity = cluster.getDesiredConfigByType(configType);
- if(componentSecurity != null && componentSecurity.getProperties().containsKey(configProperty)) {
- Map<String, String> updateProperty = new HashMap<>();
- updateProperty.put(configProperty, policyUrl);
- updateConfigurationPropertiesForCluster(cluster, configType, updateProperty, true, false);
- }
- }
-
-
-
-}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 8ab5033..22b975b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -83,9 +83,9 @@ public class StageUtils {
protected static final String RACKS = "all_racks";
protected static final String IPV4_ADDRESSES = "all_ipv4_ips";
private static Map<String, String> componentToClusterInfoKeyMap =
- new HashMap<String, String>();
+ new HashMap<>();
private static Map<String, String> decommissionedToClusterInfoKeyMap =
- new HashMap<String, String>();
+ new HashMap<>();
private volatile static Gson gson;
@Inject
@@ -96,7 +96,7 @@ public class StageUtils {
@Inject
private static Configuration configuration;
-
+
@Inject
public StageUtils(StageFactory stageFactory) {
StageUtils.stageFactory = stageFactory;
@@ -216,32 +216,32 @@ public class StageUtils {
ExecutionCommand execCmd = s.getExecutionCommandWrapper(hostname, "NAMENODE").getExecutionCommand();
execCmd.setRequestAndStage(s.getRequestId(), s.getStageId());
- List<String> slaveHostList = new ArrayList<String>();
+ List<String> slaveHostList = new ArrayList<>();
slaveHostList.add(hostname);
slaveHostList.add("host2");
- Map<String, String> hdfsSite = new TreeMap<String, String>();
+ Map<String, String> hdfsSite = new TreeMap<>();
hdfsSite.put("dfs.block.size", "2560000000");
Map<String, Map<String, String>> configurations =
- new TreeMap<String, Map<String, String>>();
+ new TreeMap<>();
configurations.put("hdfs-site", hdfsSite);
execCmd.setConfigurations(configurations);
Map<String, Map<String, Map<String, String>>> configurationAttributes =
- new TreeMap<String, Map<String, Map<String, String>>>();
- Map<String, Map<String, String>> hdfsSiteAttributes = new TreeMap<String, Map<String, String>>();
- Map<String, String> finalAttribute = new TreeMap<String, String>();
+ new TreeMap<>();
+ Map<String, Map<String, String>> hdfsSiteAttributes = new TreeMap<>();
+ Map<String, String> finalAttribute = new TreeMap<>();
finalAttribute.put("dfs.block.size", "true");
hdfsSiteAttributes.put("final", finalAttribute);
configurationAttributes.put("hdfsSite", hdfsSiteAttributes);
execCmd.setConfigurationAttributes(configurationAttributes);
- Map<String, String> params = new TreeMap<String, String>();
+ Map<String, String> params = new TreeMap<>();
params.put("jdklocation", "/x/y/z");
params.put("stack_version", "1.2.0");
params.put("stack_name", "HDP");
execCmd.setHostLevelParams(params);
- Map<String, String> roleParams = new TreeMap<String, String>();
+ Map<String, String> roleParams = new TreeMap<>();
roleParams.put("format", "false");
execCmd.setRoleParams(roleParams);
- Map<String, String> commandParams = new TreeMap<String, String>();
+ Map<String, String> commandParams = new TreeMap<>();
commandParams.put(ExecutionCommand.KeyNames.COMMAND_TIMEOUT, "600");
execCmd.setCommandParams(commandParams);
return s;
@@ -266,10 +266,10 @@ public class StageUtils {
public static Map<String, Set<String>> getClusterHostInfo(Cluster cluster) throws AmbariException {
//Fill hosts and ports lists
- Set<String> hostsSet = new LinkedHashSet<String>();
- List<Integer> portsList = new ArrayList<Integer>();
- List<String> rackList = new ArrayList<String>();
- List<String> ipV4List = new ArrayList<String>();
+ Set<String> hostsSet = new LinkedHashSet<>();
+ List<Integer> portsList = new ArrayList<>();
+ List<String> rackList = new ArrayList<>();
+ List<String> ipV4List = new ArrayList<>();
Collection<Host> allHosts = cluster.getHosts();
for (Host host : allHosts) {
@@ -297,11 +297,11 @@ public class StageUtils {
}
}
- List<String> hostsList = new ArrayList<String>(hostsSet);
- Map<String, String> additionalComponentToClusterInfoKeyMap = new HashMap<String, String>();
+ List<String> hostsList = new ArrayList<>(hostsSet);
+ Map<String, String> additionalComponentToClusterInfoKeyMap = new HashMap<>();
// Fill hosts for services
- Map<String, SortedSet<Integer>> hostRolesInfo = new HashMap<String, SortedSet<Integer>>();
+ Map<String, SortedSet<Integer>> hostRolesInfo = new HashMap<>();
for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
Service service = serviceEntry.getValue();
@@ -332,7 +332,7 @@ public class StageUtils {
SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
if (hostsForComponentsHost == null) {
- hostsForComponentsHost = new TreeSet<Integer>();
+ hostsForComponentsHost = new TreeSet<>();
hostRolesInfo.put(roleName, hostsForComponentsHost);
}
@@ -347,7 +347,7 @@ public class StageUtils {
SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(decomRoleName);
if (hostsForComponentsHost == null) {
- hostsForComponentsHost = new TreeSet<Integer>();
+ hostsForComponentsHost = new TreeSet<>();
hostRolesInfo.put(decomRoleName, hostsForComponentsHost);
}
@@ -387,7 +387,7 @@ public class StageUtils {
SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
if (hostsForComponentsHost == null) {
- hostsForComponentsHost = new TreeSet<Integer>();
+ hostsForComponentsHost = new TreeSet<>();
hostRolesInfo.put(roleName, hostsForComponentsHost);
}
@@ -406,10 +406,10 @@ public class StageUtils {
}
}
- Map<String, Set<String>> clusterHostInfo = new HashMap<String, Set<String>>();
+ Map<String, Set<String>> clusterHostInfo = new HashMap<>();
for (Map.Entry<String, SortedSet<Integer>> entry : hostRolesInfo.entrySet()) {
- TreeSet<Integer> sortedSet = new TreeSet<Integer>(entry.getValue());
+ TreeSet<Integer> sortedSet = new TreeSet<>(entry.getValue());
Set<String> replacedRangesSet = replaceRanges(sortedSet);
@@ -429,7 +429,7 @@ public class StageUtils {
* ambari-server hostname.
*/
clusterHostInfo.put(AMBARI_SERVER_HOST, Sets.newHashSet(getHostName()));
-
+
boolean serverUseSsl = configuration.getApiSSLAuthentication();
int port = serverUseSsl ? configuration.getClientSSLApiPort() : configuration.getClientApiPort();
clusterHostInfo.put(AMBARI_SERVER_PORT, Sets.newHashSet(Integer.toString(port)));
@@ -452,7 +452,7 @@ public class StageUtils {
* @throws AmbariException if an index fails to map to a host name
*/
public static Map<String, Set<String>> substituteHostIndexes(Map<String, Set<String>> clusterHostInfo) throws AmbariException {
- Set<String> keysToSkip = new HashSet<String>(Arrays.asList(HOSTS_LIST, PORTS, AMBARI_SERVER_HOST, AMBARI_SERVER_PORT, AMBARI_SERVER_USE_SSL, RACKS, IPV4_ADDRESSES));
+ Set<String> keysToSkip = new HashSet<>(Arrays.asList(HOSTS_LIST, PORTS, AMBARI_SERVER_HOST, AMBARI_SERVER_PORT, AMBARI_SERVER_USE_SSL, RACKS, IPV4_ADDRESSES));
String[] allHosts = {};
if (clusterHostInfo.get(HOSTS_LIST) != null) {
allHosts = clusterHostInfo.get(HOSTS_LIST).toArray(new String[clusterHostInfo.get(HOSTS_LIST).size()]);
@@ -462,7 +462,7 @@ public class StageUtils {
if (keysToSkip.contains(key)) {
continue;
}
- Set<String> hosts = new HashSet<String>();
+ Set<String> hosts = new HashSet<>();
Set<String> currentHostsIndexes = clusterHostInfo.get(key);
if (currentHostsIndexes == null) {
continue;
@@ -497,7 +497,7 @@ public class StageUtils {
return null;
}
- Set<String> rangedSet = new HashSet<String>();
+ Set<String> rangedSet = new HashSet<>();
Integer prevElement = null;
Integer startOfRange = set.first();
@@ -528,7 +528,7 @@ public class StageUtils {
*/
public static <T> Set<String> replaceMappedRanges(List<T> values) {
- Map<T, SortedSet<Integer>> convolutedValues = new HashMap<T, SortedSet<Integer>>();
+ Map<T, SortedSet<Integer>> convolutedValues = new HashMap<>();
int valueIndex = 0;
@@ -537,14 +537,14 @@ public class StageUtils {
SortedSet<Integer> correspValues = convolutedValues.get(value);
if (correspValues == null) {
- correspValues = new TreeSet<Integer>();
+ correspValues = new TreeSet<>();
convolutedValues.put(value, correspValues);
}
correspValues.add(valueIndex);
valueIndex++;
}
- Set<String> result = new HashSet<String>();
+ Set<String> result = new HashSet<>();
for (Entry<T, SortedSet<Integer>> entry : convolutedValues.entrySet()) {
Set<String> replacedRanges = replaceRanges(entry.getValue());
@@ -567,7 +567,7 @@ public class StageUtils {
* @return a set of integers representing the original range
*/
private static Set<Integer> rangeToSet(String range) {
- Set<Integer> indexSet = new HashSet<Integer>();
+ Set<Integer> indexSet = new HashSet<>();
int startIndex;
int endIndex;
if (range.contains("-")) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/python/ambari_server/serverUpgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 8263062..07f5097 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -60,10 +60,6 @@ SCHEMA_UPGRADE_HELPER_CMD = "{0} -cp {1} " + \
"org.apache.ambari.server.upgrade.SchemaUpgradeHelper" + \
" > " + configDefaults.SERVER_OUT_FILE + " 2>&1"
-STACK_UPGRADE_HELPER_CMD = "{0} -cp {1} " + \
- "org.apache.ambari.server.upgrade.StackUpgradeHelper" + \
- " {2} {3} > " + configDefaults.SERVER_OUT_FILE + " 2>&1"
-
SCHEMA_UPGRADE_HELPER_CMD_DEBUG = "{0} " \
"-server -XX:NewRatio=2 " \
"-XX:+UseConcMarkSweepGC " + \
@@ -77,52 +73,6 @@ SCHEMA_UPGRADE_DEBUG = False
SUSPEND_START_MODE = False
-#
-# Stack upgrade
-#
-
-def upgrade_stack(args):
- logger.info("Upgrade stack.")
- if not is_root():
- err = 'Ambari-server upgradestack should be run with ' \
- 'root-level privileges'
- raise FatalException(4, err)
-
- check_database_name_property()
-
- try:
- stack_id = args[1]
- except IndexError:
- #stack_id is mandatory
- raise FatalException("Invalid number of stack upgrade arguments")
-
- try:
- repo_url = args[2]
- except IndexError:
- repo_url = None
-
- try:
- repo_url_os = args[3]
- except IndexError:
- repo_url_os = None
-
- parser = optparse.OptionParser()
- parser.add_option("-d", type="int", dest="database_index")
-
- db = get_ambari_properties()[JDBC_DATABASE_PROPERTY]
-
- idx = LINUX_DBMS_KEYS_LIST.index(db)
-
- (options, opt_args) = parser.parse_args(["-d {0}".format(idx)])
-
- stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
- retcode = run_stack_upgrade(options, stack_name, stack_version, repo_url, repo_url_os)
-
- if not retcode == 0:
- raise FatalException(retcode, 'Stack upgrade failed.')
-
- return retcode
-
def load_stack_values(version, filename):
import xml.etree.ElementTree as ET
values = {}
@@ -144,51 +94,6 @@ def load_stack_values(version, filename):
return values
-def run_stack_upgrade(args, stackName, stackVersion, repo_url, repo_url_os):
- jdk_path = get_java_exe_path()
- if jdk_path is None:
- print_error_msg("No JDK found, please run the \"setup\" "
- "command to install a JDK automatically or install any "
- "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
- return 1
- stackId = {}
- stackId[stackName] = stackVersion
- if repo_url is not None:
- stackId['repo_url'] = repo_url
- if repo_url_os is not None:
- stackId['repo_url_os'] = repo_url_os
-
- serverClassPath = ServerClassPath(get_ambari_properties(), args)
- command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, serverClassPath.get_full_ambari_classpath_escaped_for_shell(),
- "updateStackId",
- "'" + json.dumps(stackId) + "'")
- (retcode, stdout, stderr) = run_os_command(command)
- print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
- if retcode > 0:
- print_error_msg("Error executing stack upgrade, please check the server logs.")
- return retcode
-
-def run_metainfo_upgrade(args, keyValueMap=None):
- jdk_path = get_java_exe_path()
- if jdk_path is None:
- print_error_msg("No JDK found, please run the \"setup\" "
- "command to install a JDK automatically or install any "
- "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
-
- retcode = 1
- if keyValueMap:
- serverClassPath = ServerClassPath(get_ambari_properties(), args)
- command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, serverClassPath.get_full_ambari_classpath_escaped_for_shell(),
- 'updateMetaInfo',
- "'" + json.dumps(keyValueMap) + "'")
- (retcode, stdout, stderr) = run_os_command(command)
- print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
- if retcode > 0:
- print_error_msg("Error executing metainfo upgrade, please check the server logs.")
-
- return retcode
-
-
#
# Repo upgrade
#
@@ -203,48 +108,6 @@ def change_objects_owner(args):
dbms.change_db_files_owner()
-def upgrade_local_repo(args):
- properties = get_ambari_properties()
- if properties == -1:
- print_error_msg("Error getting ambari properties")
- return -1
-
- stack_location = get_stack_location(properties)
- stack_root_local = os.path.join(stack_location, "HDPLocal")
- if not os.path.exists(stack_root_local):
- print_info_msg("HDPLocal stack directory does not exist, skipping")
- return
-
- stack_root = os.path.join(stack_location, "HDP")
- if not os.path.exists(stack_root):
- print_info_msg("HDP stack directory does not exist, skipping")
- return
-
- for stack_version_local in os.listdir(stack_root_local):
- repo_file_local = os.path.join(stack_root_local, stack_version_local, "repos", "repoinfo.xml.rpmsave")
- if not os.path.exists(repo_file_local):
- repo_file_local = os.path.join(stack_root_local, stack_version_local, "repos", "repoinfo.xml")
-
- repo_file = os.path.join(stack_root, stack_version_local, "repos", "repoinfo.xml")
-
- print_info_msg("Local repo file: {0}".format(repo_file_local))
- print_info_msg("Repo file: {0}".format(repo_file_local))
-
- metainfo_update_items = {}
-
- if os.path.exists(repo_file_local) and os.path.exists(repo_file):
- local_values = load_stack_values(stack_version_local, repo_file_local)
- repo_values = load_stack_values(stack_version_local, repo_file)
- for k, v in local_values.iteritems():
- if repo_values.has_key(k):
- local_url = local_values[k]
- repo_url = repo_values[k]
- if repo_url != local_url:
- metainfo_update_items[k] = local_url
-
- run_metainfo_upgrade(args, metainfo_update_items)
-
-#
# Schema upgrade
#
@@ -384,9 +247,6 @@ def upgrade(args):
else:
adjust_directory_permissions(user)
- # local repo
- upgrade_local_repo(args)
-
# create jdbc symlinks if jdbc drivers are available in resources
check_jdbc_drivers(args)
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/main/resources/META-INF/persistence.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/META-INF/persistence.xml b/ambari-server/src/main/resources/META-INF/persistence.xml
index 0375650..60c1f11 100644
--- a/ambari-server/src/main/resources/META-INF/persistence.xml
+++ b/ambari-server/src/main/resources/META-INF/persistence.xml
@@ -28,7 +28,6 @@
<class>org.apache.ambari.server.orm.entities.ClusterEntity</class>
<class>org.apache.ambari.server.orm.entities.ClusterServiceEntity</class>
<class>org.apache.ambari.server.orm.entities.ClusterStateEntity</class>
- <class>org.apache.ambari.server.orm.entities.ClusterVersionEntity</class>
<class>org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity</class>
<class>org.apache.ambari.server.orm.entities.ConfigGroupEntity</class>
<class>org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity</class>