You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2014/02/21 20:03:59 UTC
[3/4] AMBARI-4716. Run Ambari Server Upgrade via code rather than
DDL/DML. (mpapirkovskyy)
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
new file mode 100644
index 0000000..b9fec20
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.gson.Gson;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.InputMismatchException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class StackUpgradeHelper {
+ private static final Logger LOG = LoggerFactory.getLogger
+ (StackUpgradeHelper.class);
+
+ private static final String STACK_ID_UPDATE_ACTION = "updateStackId";
+ private static final String METAINFO_UPDATE_ACTION = "updateMetaInfo";
+ private static final String STACK_ID_STACK_NAME_KEY = "stackName";
+
+ @Inject
+ private DBAccessor dbAccessor;
+ @Inject
+ private PersistService persistService;
+ @Inject
+ private MetainfoDAO metainfoDAO;
+ @Inject
+ private StackUpgradeUtil stackUpgradeUtil;
+
+ private void startPersistenceService() {
+ persistService.start();
+ }
+
+ private void stopPersistenceService() {
+ persistService.stop();
+ }
+
+ /**
+ * Add key value to the metainfo table.
+ * @param data
+ * @throws SQLException
+ */
+ @Transactional
+ private void updateMetaInfo(Map<String, String> data) throws SQLException {
+ if (data != null && !data.isEmpty()) {
+ for (Map.Entry<String, String> entry : data.entrySet()) {
+ MetainfoEntity metainfoEntity = metainfoDAO.findByKey(entry.getKey());
+ if (metainfoEntity != null) {
+ metainfoEntity.setMetainfoName(entry.getKey());
+ metainfoEntity.setMetainfoValue(entry.getValue());
+ metainfoDAO.merge(metainfoEntity);
+ } else {
+ metainfoEntity = new MetainfoEntity();
+ metainfoEntity.setMetainfoName(entry.getKey());
+ metainfoEntity.setMetainfoValue(entry.getValue());
+ metainfoDAO.create(metainfoEntity);
+ }
+ }
+ }
+ }
+
+ /**
+ * Change the stack id in the Ambari DB.
+ * @param stackId
+ * @throws SQLException
+ */
+ private void updateStackVersion(Map<String, String> stackId) throws SQLException {
+ if (stackId == null || stackId.isEmpty()) {
+ throw new IllegalArgumentException("Empty stack id. " + stackId);
+ }
+ Iterator<Map.Entry<String, String>> stackIdEntry = stackId.entrySet().iterator();
+ Map.Entry<String, String> stackEntry = stackIdEntry.next();
+
+ String stackName = stackEntry.getKey();
+ String stackVersion = stackEntry.getValue();
+
+ LOG.info("Updating stack id, stackName = " + stackName + ", " +
+ "stackVersion = "+ stackVersion);
+
+ stackUpgradeUtil.updateStackDetails(stackName, stackVersion);
+
+ dbAccessor.updateTable("hostcomponentstate", "current_state", "INSTALLED", "where current_state = 'UPGRADING'");
+ }
+
+ private List<String> getValidActions() {
+ return new ArrayList<String>() {{
+ add(STACK_ID_UPDATE_ACTION);
+ add(METAINFO_UPDATE_ACTION);
+ }};
+ }
+
+ /**
+ * Support changes need to support upgrade of Stack
+ * @param args Simple key value json map
+ */
+ public static void main(String[] args) {
+ try {
+ if (args.length < 2) {
+ throw new InputMismatchException("Need to provide action, " +
+ "stack name and stack version.");
+ }
+
+ String action = args[0];
+ String valueMap = args[1];
+
+ Injector injector = Guice.createInjector(new ControllerModule());
+ StackUpgradeHelper stackUpgradeHelper = injector.getInstance(StackUpgradeHelper.class);
+ Gson gson = injector.getInstance(Gson.class);
+
+ if (!stackUpgradeHelper.getValidActions().contains(action)) {
+ throw new IllegalArgumentException("Unsupported action. Allowed " +
+ "actions: " + stackUpgradeHelper.getValidActions());
+ }
+
+ stackUpgradeHelper.startPersistenceService();
+ Map values = gson.fromJson(valueMap, Map.class);
+
+ if (action.equals(STACK_ID_UPDATE_ACTION)) {
+ stackUpgradeHelper.updateStackVersion(values);
+ } else if (action.equals(METAINFO_UPDATE_ACTION)) {
+
+ stackUpgradeHelper.updateMetaInfo(values);
+ }
+
+ stackUpgradeHelper.stopPersistenceService();
+
+ } catch (Throwable t) {
+ LOG.error("Caught exception on upgrade. Exiting...", t);
+ System.exit(1);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
new file mode 100644
index 0000000..75189cc
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.StackId;
+import java.util.ArrayList;
+import java.util.List;
+
+public class StackUpgradeUtil {
+ @Inject
+ private Gson gson;
+ @Inject
+ private Injector injector;
+
+ private String getStackIdString(String originalStackId, String stackName,
+ String stackVersion) {
+ if (stackVersion == null) {
+ stackVersion = gson.fromJson(originalStackId, StackId.class).getStackVersion();
+ }
+
+ return String.format(
+ "{\"stackName\":\"%s\",\"stackVersion\":\"%s\"}",
+ stackName,
+ stackVersion
+ );
+ }
+
+ @Transactional
+ public void updateStackDetails(String stackName, String stackVersion) {
+ ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+ List<Long> clusterIds = new ArrayList<Long>();
+
+ List<ClusterEntity> clusterEntities = clusterDAO.findAll();
+ if (clusterEntities != null && !clusterEntities.isEmpty()) {
+ for (ClusterEntity entity : clusterEntities) {
+ clusterIds.add(entity.getClusterId());
+ String stackIdString = entity.getDesiredStackVersion();
+ entity.setDesiredStackVersion(getStackIdString(stackIdString,
+ stackName, stackVersion));
+ clusterDAO.merge(entity);
+ }
+ }
+
+ ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
+
+ for (Long clusterId : clusterIds) {
+ ClusterStateEntity clusterStateEntity = clusterStateDAO.findByPK(clusterId);
+ String currentStackVersion = clusterStateEntity.getCurrentStackVersion();
+ clusterStateEntity.setCurrentStackVersion(getStackIdString
+ (currentStackVersion, stackName, stackVersion));
+ clusterStateDAO.merge(clusterStateEntity);
+ }
+
+ HostComponentStateDAO hostComponentStateDAO = injector.getInstance
+ (HostComponentStateDAO.class);
+ List<HostComponentStateEntity> hcEntities = hostComponentStateDAO.findAll();
+
+ if (hcEntities != null) {
+ for (HostComponentStateEntity hc : hcEntities) {
+ String currentStackVersion = hc.getCurrentStackVersion();
+ hc.setCurrentStackVersion(getStackIdString(currentStackVersion,
+ stackName, stackVersion));
+ hostComponentStateDAO.merge(hc);
+ }
+ }
+
+ HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
+ injector.getInstance(HostComponentDesiredStateDAO.class);
+
+ List<HostComponentDesiredStateEntity> hcdEntities = hostComponentDesiredStateDAO.findAll();
+
+ if (hcdEntities != null) {
+ for (HostComponentDesiredStateEntity hcd : hcdEntities) {
+ String desiredStackVersion = hcd.getDesiredStackVersion();
+ hcd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+ stackName, stackVersion));
+ hostComponentDesiredStateDAO.merge(hcd);
+ }
+ }
+
+ ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
+ injector.getInstance(ServiceComponentDesiredStateDAO.class);
+
+ List<ServiceComponentDesiredStateEntity> scdEntities =
+ serviceComponentDesiredStateDAO.findAll();
+
+ if (scdEntities != null) {
+ for (ServiceComponentDesiredStateEntity scd : scdEntities) {
+ String desiredStackVersion = scd.getDesiredStackVersion();
+ scd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+ stackName, stackVersion));
+ serviceComponentDesiredStateDAO.merge(scd);
+ }
+ }
+
+ ServiceDesiredStateDAO serviceDesiredStateDAO = injector.getInstance(ServiceDesiredStateDAO.class);
+
+ List<ServiceDesiredStateEntity> sdEntities = serviceDesiredStateDAO.findAll();
+
+ if (sdEntities != null) {
+ for (ServiceDesiredStateEntity sd : sdEntities) {
+ String desiredStackVersion = sd.getDesiredStackVersion();
+ sd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+ stackName, stackVersion));
+ serviceDesiredStateDAO.merge(sd);
+ }
+ }
+
+
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
new file mode 100644
index 0000000..07384cc
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import org.apache.ambari.server.AmbariException;
+
+import java.sql.SQLException;
+
+/**
+ * Interface for upgrading Ambari DB
+ */
+public interface UpgradeCatalog {
+ /**
+ * Run the upgrade scripts for upgrading ambari server from current version
+ * to the new version.
+ * @throws AmbariException
+ */
+ public void upgradeSchema() throws AmbariException, SQLException;
+
+ /**
+ * Start persistence service and perform updates as necessary
+ * @throws AmbariException
+ * @throws SQLException
+ */
+ public void executeDMLUpdates() throws AmbariException, SQLException;
+
+ /**
+ * Return the version that will be upgraded to
+ * @return
+ */
+ public abstract String getTargetVersion();
+
+ /**
+ * Return latest source version that can be upgraded from.
+ * Return null since no UpgradeCatalogs exist before this one.
+ *
+ * @return null : default
+ */
+ public String getSourceVersion();
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
new file mode 100644
index 0000000..86bbef3
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
@@ -0,0 +1,487 @@
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.State;
+import org.eclipse.persistence.jpa.JpaEntityManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+import java.io.File;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
+ private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog150.class);
+ private static final String quartzScriptFilePattern = "quartz.%s.sql";
+ private Injector injector;
+
+ @Inject
+ public UpgradeCatalog150(Injector injector) {
+ super(injector);
+ this.injector = injector;
+ }
+
+ @Override
+ public void executeDDLUpdates() throws AmbariException, SQLException {
+ LOG.debug("Upgrading schema...");
+
+ List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+
+ // ========================================================================
+ // Create tables
+
+ // ClusterConfigMapping
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("type_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("version_tag", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("selected", Integer.class, 0, null, false));
+ columns.add(new DBColumnInfo("user_name", String.class, 255, "_db", false));
+
+ dbAccessor.createTable("clusterconfigmapping", columns, "cluster_id", "type_name", "create_timestamp");
+
+ // Request
+ columns.clear();
+ columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("request_schedule_id", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("command_name", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("create_time", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("end_time", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("inputs", byte[].class, null, null, true));
+ columns.add(new DBColumnInfo("request_context", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("request_type", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("start_time", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("target_component", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("target_hosts", String.class, null, null, false));
+ columns.add(new DBColumnInfo("target_service", String .class, 255, null, true));
+
+ dbAccessor.createTable("request", columns, "request_id");
+
+ // RequestSchedule
+ columns.clear();
+ columns.add(new DBColumnInfo("schedule_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("description", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("status", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("batch_separation_seconds", Integer.class, null, null, true));
+ columns.add(new DBColumnInfo("batch_toleration_limit", Integer.class, null, null, true));
+ columns.add(new DBColumnInfo("create_user", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("update_user", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("update_timestamp", Long.class, null, null, true));
+ columns.add(new DBColumnInfo("minutes", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("hours", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("days_of_month", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("month", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("day_of_week", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("yearToSchedule", String.class, 10, null, true));
+ columns.add(new DBColumnInfo("startTime", String.class, 50, null, true));
+ columns.add(new DBColumnInfo("endTime", String.class, 50, null, true));
+ columns.add(new DBColumnInfo("last_execution_status", String.class, 255, null, true));
+
+ dbAccessor.createTable("requestschedule", columns, "schedule_id");
+
+ // RequestScheduleBatchRequest
+ columns.clear();
+ columns.add(new DBColumnInfo("schedule_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("batch_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("request_type", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("request_uri", String.class, 1024, null, true));
+ columns.add(new DBColumnInfo("request_body", byte[].class, null, null, true));
+ columns.add(new DBColumnInfo("request_status", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("return_code", Integer.class, null, null, true));
+ columns.add(new DBColumnInfo("return_message", String.class, 2000, null, true));
+
+ dbAccessor.createTable("requestschedulebatchrequest", columns, "schedule_id", "batch_id");
+
+ // HostConfigMapping
+ columns.clear();
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("type_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("version_tag", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("service_name", String.class, 255, null, true));
+ columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("selected", Integer.class, 0, null, false));
+
+ dbAccessor.createTable("hostconfigmapping", columns, "cluster_id", "host_name", "type_name", "create_timestamp");
+
+ // Sequences
+ columns.clear();
+ columns.add(new DBColumnInfo("sequence_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("value", Long.class, null, null, false));
+
+ dbAccessor.createTable("ambari_sequences", columns, "sequence_name");
+
+ // Metainfo
+
+ columns.clear();
+ columns.add(new DBColumnInfo("metainfo_key", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("metainfo_value", String.class, 255, null, false));
+
+ dbAccessor.createTable("metainfo", columns, "metainfo_key");
+
+ // ConfigGroup
+ columns.clear();
+ columns.add(new DBColumnInfo("group_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("group_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("tag", String.class, 1024, null, false));
+ columns.add(new DBColumnInfo("description", String.class, 1024, null, true));
+ columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+
+ dbAccessor.createTable("configgroup", columns, "group_id");
+
+ // ConfigGroupClusterConfigMapping
+ columns.clear();
+ columns.add(new DBColumnInfo("config_group_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("config_type", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("version_tag", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("user_name", String.class, 255, "_db", true));
+ columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+
+ dbAccessor.createTable("confgroupclusterconfigmapping", columns, "config_group_id", "cluster_id", "config_type");
+
+ // ConfigGroupHostMapping
+ columns.clear();
+ columns.add(new DBColumnInfo("config_group_id", Long.class, null, null, false));
+ columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
+
+ dbAccessor.createTable("configgrouphostmapping", columns, "config_group_id", "host_name");
+
+ // Blueprint
+ columns.clear();
+ columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("stack_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("stack_version", String.class, 255, null, false));
+
+ dbAccessor.createTable("blueprint", columns, "blueprint_name");
+
+ // HostGroup
+ columns.clear();
+ columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("cardinality", String.class, 255, null, false));
+
+ dbAccessor.createTable("hostgroup", columns, "blueprint_name", "name");
+
+ // HostGroupComponent
+ columns.clear();
+ columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("hostgroup_name", String.class, 255, null, false));
+ columns.add(new DBColumnInfo("name", String.class, 255, null, false));
+
+ dbAccessor.createTable("hostgroup_component", columns, "blueprint_name", "hostgroup_name", "name");
+
+ createQuartzTables();
+
+ // ========================================================================
+ // Add columns
+
+ dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo("passive_state", String.class, 32, "ACTIVE", false));
+ dbAccessor.addColumn("servicedesiredstate", new DBColumnInfo("passive_state", String.class, 32, "ACTIVE", false));
+ dbAccessor.addColumn("hoststate", new DBColumnInfo("passive_state", String.class, 512, null, true));
+ dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo("admin_state", String.class, 32, null, true));
+ dbAccessor.addColumn("hosts", new DBColumnInfo("ph_cpu_count", Integer.class, 32, null, true));
+ dbAccessor.addColumn("clusterstate", new DBColumnInfo("current_stack_version", String.class, 255, null, false));
+ dbAccessor.addColumn("hostconfigmapping", new DBColumnInfo("user_name", String.class, 255, "_db", false));
+ dbAccessor.addColumn("stage", new DBColumnInfo("request_context", String.class, 255, null, true));
+ dbAccessor.addColumn("stage", new DBColumnInfo("cluster_host_info", byte[].class, null, null, true));
+ dbAccessor.addColumn("clusterconfigmapping", new DBColumnInfo("user_name", String.class, 255, "_db", false));
+ dbAccessor.addColumn("host_role_command", new DBColumnInfo("end_time", Long.class, null, null, true));
+ dbAccessor.addColumn("host_role_command", new DBColumnInfo("structured_out", byte[].class, null, null, true));
+ dbAccessor.addColumn("host_role_command", new DBColumnInfo("command_detail", String.class, 255, null, true));
+ dbAccessor.addColumn("host_role_command", new DBColumnInfo("custom_command_name", String.class, 255, null, true));
+
+ // Alter columns
+
+ if (getDbType().equals(Configuration.POSTGRES_DB_NAME)) {
+ try {
+ dbAccessor.executeQuery("ALTER TABLE hostcomponentdesiredconfigmapping rename to hcdesiredconfigmapping;");
+ dbAccessor.executeQuery("ALTER TABLE users ALTER column user_id DROP DEFAULT;");
+ dbAccessor.executeQuery("ALTER TABLE users ALTER column ldap_user TYPE INTEGER USING CASE WHEN ldap_user=true THEN 1 ELSE 0 END;");
+ dbAccessor.executeQuery("ALTER TABLE hosts DROP COLUMN disks_info;");
+ } catch (SQLException e) {
+ LOG.warn("Error encountered while altering schema. ", e);
+ // continue updates
+ }
+ }
+
+ // ========================================================================
+ // Add constraints
+
+ dbAccessor.addFKConstraint("stage", "FK_stage_request_id", "request_id", "request", "request_id", true);
+ dbAccessor.addFKConstraint("request", "FK_request_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+ dbAccessor.addFKConstraint("request", "FK_request_schedule_id", "request_schedule_id", "requestschedule", "schedule_id", true);
+ dbAccessor.addFKConstraint("requestschedulebatchrequest", "FK_requestschedulebatchrequest_schedule_id", "schedule_id", "requestschedule", "schedule_id", true);
+ dbAccessor.addFKConstraint("hostconfigmapping", "FK_hostconfigmapping_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+ dbAccessor.addFKConstraint("hostconfigmapping", "FK_hostconfigmapping_host_name", "host_name", "hosts", "host_name", true);
+ dbAccessor.addFKConstraint("configgroup", "FK_configgroup_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+ dbAccessor.addFKConstraint("confgroupclusterconfigmapping", "FK_cg_cluster_cm_config_tag", new String[] {"version_tag", "config_type", "cluster_id"}, "clusterconfig", new String[] {"version_tag", "type_name", "cluster_id"}, true);
+ dbAccessor.addFKConstraint("confgroupclusterconfigmapping", "FK_cg_cluster_cm_group_id", "config_group_id", "configgroup", "group_id", true);
+ dbAccessor.addFKConstraint("confgrouphostmapping", "FK_cghostm_configgroup_id", "config_group_id", "configgroup", "group_id", true);
+ dbAccessor.addFKConstraint("confgrouphostmapping", "FK_cghostm_host_name", "host_name", "hosts", "host_name", true);
+ dbAccessor.addFKConstraint("clusterconfigmapping", "FK_clustercfgmap_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+
+ // ========================================================================
+ // Finally update schema version
+ updateMetaInfoVersion(getTargetVersion());
+ }
+
+ @Override
+ public void executeDMLUpdates() throws AmbariException, SQLException {
+ // Service Config mapping
+ String tableName = "serviceconfigmapping";
+ String dbType = getDbType();
+
+ EntityManager em = getEntityManagerProvider().get();
+
+ //unable to do via dao, as they were dropped
+ //TODO should we move this to ddl and drop unused tables then?
+ if (dbAccessor.tableExists(tableName)
+ && dbAccessor.tableHasData(tableName)
+ && dbAccessor.tableExists("clusterconfigmapping")) {
+
+ if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ // Service config mapping entity object will be deleted so need to
+ // proceed with executing as query
+
+ dbAccessor.executeQuery(getPostgresServiceConfigMappingQuery());
+
+ dbAccessor.truncateTable(tableName);
+
+ } else {
+ LOG.warn("Unsupported database for service config mapping query. " +
+ "database = " + dbType);
+ }
+ }
+
+
+ // TODO: Convert all possible native queries using Criteria builder
+ // Request Entries
+ tableName = "request";
+ if (dbAccessor.tableExists(tableName) &&
+ !dbAccessor.tableHasData(tableName)) {
+
+ String query;
+ if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ query = getPostgresRequestUpgradeQuery();
+ } else if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
+ query = getOracleRequestUpgradeQuery();
+ } else {
+ query = getMysqlRequestUpgradeQuery();
+ }
+
+ dbAccessor.executeQuery(query);
+ }
+
+ // Sequences
+ if (dbAccessor.tableExists("ambari_sequences")) {
+ if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ try {
+ dbAccessor.executeQuery(getPostgresSequenceUpgradeQuery());
+ // Deletes
+ dbAccessor.dropSequence("host_role_command_task_id_seq");
+ dbAccessor.dropSequence("users_user_id_seq");
+ dbAccessor.dropSequence("clusters_cluster_id_seq");
+ } catch (SQLException sql) {
+ LOG.warn("Sequence update threw exception. ", sql);
+ }
+ }
+ }
+
+ //clear cache due to direct table manipulation
+ ((JpaEntityManager)em.getDelegate()).getServerSession().getIdentityMapAccessor().invalidateAll();
+
+ // Updates
+
+ // HostComponentState
+ CriteriaBuilder cb = em.getCriteriaBuilder();
+ CriteriaQuery<HostComponentStateEntity> c1 = cb.createQuery(HostComponentStateEntity.class);
+ Root<HostComponentStateEntity> hsc = c1.from(HostComponentStateEntity.class);
+ Expression<String> exp = hsc.get("current_state");
+ List<String> statuses = new ArrayList<String>() {{
+ add("STOP_FAILED");
+ add("START_FAILED");
+ }};
+ Predicate predicate = exp.in(statuses);
+ c1.select(hsc).where(predicate);
+
+ TypedQuery<HostComponentStateEntity> q1 = em.createQuery(c1);
+ List<HostComponentStateEntity> r1 = q1.getResultList();
+
+ HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
+ if (r1 != null && !r1.isEmpty()) {
+ for (HostComponentStateEntity entity : r1) {
+ entity.setCurrentState(State.INSTALLED);
+ hostComponentStateDAO.merge(entity);
+ }
+ }
+
+ // HostRoleCommand
+ CriteriaQuery<HostRoleCommandEntity> c2 = cb.createQuery(HostRoleCommandEntity.class);
+ Root<HostRoleCommandEntity> hrc = c2.from(HostRoleCommandEntity.class);
+ statuses = new ArrayList<String>() {{
+ add("PENDING");
+ add("QUEUED");
+ add("IN_PROGRESS");
+ }};
+ exp = hrc.get("status");
+ predicate = exp.in(statuses);
+ c2.select(hrc).where(predicate);
+
+ TypedQuery<HostRoleCommandEntity> q2 = em.createQuery(c2);
+ List<HostRoleCommandEntity> r2 = q2.getResultList();
+
+ HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
+ if (r2 != null && !r2.isEmpty()) {
+ for (HostRoleCommandEntity entity : r2) {
+ entity.setStatus(HostRoleStatus.ABORTED);
+ hostRoleCommandDAO.merge(entity);
+ }
+ }
+
+ // Stack version changes from HDPLocal to HDP
+ stackUpgradeUtil.updateStackDetails("HDP", null);
+
+ //create cluster state entities if not present
+ executeInTransaction(new Runnable() {
+ @Override
+ public void run() {
+ ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+ ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
+ List<ClusterEntity> clusterEntities = clusterDAO.findAll();
+ for (ClusterEntity clusterEntity : clusterEntities) {
+ if (clusterStateDAO.findByPK(clusterEntity.getClusterId()) == null) {
+ ClusterStateEntity clusterStateEntity = new ClusterStateEntity();
+ clusterStateEntity.setClusterEntity(clusterEntity);
+ clusterStateEntity.setCurrentStackVersion(clusterEntity.getDesiredStackVersion());
+
+ clusterStateDAO.create(clusterStateEntity);
+
+ clusterEntity.setClusterStateEntity(clusterStateEntity);
+
+ clusterDAO.merge(clusterEntity);
+ }
+ }
+ }
+ });
+
+
+
+ }
+
+ private String getPostgresServiceConfigMappingQuery() {
+ return "INSERT INTO clusterconfigmapping " +
+ "(cluster_id, type_name, version_tag, create_timestamp, selected) " +
+ "(SELECT DISTINCT cluster_id, config_type, config_tag, " +
+ "cast(date_part('epoch', now()) as bigint), 1 " +
+ "FROM serviceconfigmapping scm " +
+ "WHERE timestamp = (SELECT max(timestamp) FROM serviceconfigmapping " +
+ "WHERE cluster_id = scm.cluster_id AND config_type = scm.config_type))";
+ }
+
+ private String getPostgresSequenceUpgradeQuery() {
+ return "INSERT INTO ambari_sequences(sequence_name, \"value\") " +
+ "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
+ "UNION ALL " +
+ "SELECT 'user_id_seq', nextval('users_user_id_seq') " +
+ "UNION ALL " +
+ "SELECT 'host_role_command_id_seq', COALESCE((SELECT max(task_id) FROM host_role_command), 1) + 50 " +
+ "UNION ALL " +
+ "SELECT 'configgroup_id_seq', 1";
+ }
+
+ private String getPostgresRequestUpgradeQuery() {
+ return "insert into request" +
+ "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+ "(select distinct s.request_id, s.cluster_id, s.request_context, " +
+ "coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1 " +
+ "from " +
+ "(select distinct request_id, cluster_id, request_context from stage ) s " +
+ "left join " +
+ "(select request_id, min(start_time) as start_time, max(end_time) " +
+ "as end_time from host_role_command group by request_id) cmd";
+ }
+
+ private String getOracleRequestUpgradeQuery() {
+ return "INSERT INTO request" +
+ "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+ "SELECT DISTINCT s.request_id, s.cluster_id, s.request_context, " +
+ "nvl(cmd.start_time, -1), nvl(cmd.end_time, -1), -1" +
+ "FROM " +
+ "(SELECT DISTINCT request_id, cluster_id, request_context FROM stage ) s " +
+ "LEFT JOIN " +
+ "(SELECT request_id, min(start_time) as start_time, max(end_time) " +
+ "as end_time FROM host_role_command GROUP BY request_id) cmd " +
+ "ON s.request_id=cmd.request_id";
+ }
+
+ private String getMysqlRequestUpgradeQuery() {
+ return "insert into request" +
+ "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+ "select distinct s.request_id, s.cluster_id, s.request_context, " +
+ "coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1 " +
+ "from " +
+ "(select distinct request_id, cluster_id, request_context from stage ) s " +
+ "left join " +
+ "(select request_id, min(start_time) as start_time, max(end_time) " +
+ "as end_time from host_role_command group by request_id) cmd " +
+ "on s.request_id=cmd.request_id";
+ }
+
+ private void createQuartzTables() throws SQLException {
+ String dbType = getDbType();
+
+ // Run script to create quartz tables
+ String scriptPath = configuration.getResourceDirPath() +
+ File.separator + "upgrade" + File.separator + "ddl" +
+ File.separator + String.format(quartzScriptFilePattern, dbType);
+
+ try {
+ dbAccessor.executeScript(scriptPath);
+ } catch (IOException e) {
+ LOG.error("Error reading file.", e);
+ }
+
+ // TODO: Verify if this is necessary and possible
+ if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ grantAllPostgresPrivileges();
+ }
+ }
+
+ @Override
+ public String getTargetVersion() {
+ return "1.5.0";
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
index 785f4fd..aeceedf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
@@ -94,4 +94,17 @@ public class DateUtils {
Date now = new Date();
return time.after(now);
}
+
+ public static void main(String[] args) {
+ String s = "INSERT INTO ambari_sequences(sequence_name, \"value\") " +
+ "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
+ "UNION ALL " +
+ "SELECT 'user_id_seq', nextval('users_user_id_seq') " +
+ "UNION ALL " +
+ "SELECT 'host_role_command_id_seq', COALESCE((SELECT max(task_id) FROM host_role_command), 1) + 50 " +
+ "UNION ALL " +
+ "SELECT 'configgroup_id_seq', 1";
+
+ System.out.println(s);
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
index d3df4e5..9e3c0c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
@@ -129,4 +129,18 @@ public class VersionUtils {
public static boolean areVersionsEqual(String version1, String version2, boolean allowEmptyVersions) {
return 0 == compareVersions(version1, version2, allowEmptyVersions);
}
+
+ /**
+ * Return N.N.N from N.N.N.xyz
+ * @param version
+ * @return
+ */
+ public static String getVersionSubstring(String version) {
+ String[] versionParts = version.split("\\.");
+ if (versionParts.length < 3) {
+ throw new IllegalArgumentException("Invalid version number");
+ }
+
+ return versionParts[0] + "." + versionParts[1] + "." + versionParts[2];
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index e3439e0..931a41a 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -40,6 +40,7 @@ import tempfile
import random
import pwd
from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
+import json
# debug settings
VERBOSE = False
@@ -174,6 +175,17 @@ SECURITY_PROVIDER_KEY_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
".MasterKeyServiceImpl {3} {4} {5} " +\
"> " + SERVER_OUT_FILE + " 2>&1"
+SCHEMA_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
+ os.pathsep + "{2} " +\
+ "org.apache.ambari.server.upgrade.SchemaUpgradeHelper" +\
+ " {3}"
+
+STACK_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
+ os.pathsep + "{2} " +\
+ "org.apache.ambari.server.upgrade.StackUpgradeHelper" +\
+ " {3} {4}"
+
+
SECURITY_KEYS_DIR = "security.server.keys_dir"
SECURITY_MASTER_KEY_LOCATION = "security.master.key.location"
SECURITY_KEY_IS_PERSISTED = "security.master.key.ispersisted"
@@ -249,6 +261,8 @@ PG_HBA_CONF_FILE = os.path.join(PG_HBA_DIR, "pg_hba.conf")
PG_HBA_CONF_FILE_BACKUP = os.path.join(PG_HBA_DIR, "pg_hba_bak.conf.old")
POSTGRESQL_CONF_FILE = os.path.join(PG_HBA_DIR, "postgresql.conf")
+SERVER_VERSION_FILE_PATH = "server.version.file"
+
JDBC_DATABASE_PROPERTY = "server.jdbc.database"
JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
JDBC_PORT_PROPERTY = "server.jdbc.port"
@@ -316,12 +330,6 @@ DATABASE_INIT_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-R
DATABASE_DROP_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-REMOTE-DROP.sql',
'/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql',
'/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql']
-DATABASE_UPGRADE_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-Postgres-REMOTE-UPGRADE.sql',
- '/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql',
- '/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql']
-DATABASE_STACK_UPGRADE_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-REMOTE-UPGRADE_STACK.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-UPGRADE_STACK.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-UPGRADE_STACK.sql']
JDBC_PROPERTIES_PREFIX = "server.jdbc.properties."
DATABASE_JDBC_PROPERTIES = [
@@ -387,15 +395,6 @@ DEFAULT_DB_NAME = "ambari"
# stack repo upgrade
STACK_LOCATION_KEY = 'metadata.path'
STACK_LOCATION_DEFAULT = '/var/lib/ambari-server/resources/stacks'
-DATABASE_INSERT_METAINFO_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-INSERT_METAINFO.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-INSERT_METAINFO.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-INSERT_METAINFO.sql']
-DATABASE_FIX_LOCAL_REPO_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql',
- '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-FIX_LOCAL_REPO.sql']
-INSERT_METAINFO_CMD = ['su', 'postgres',
- '--command=psql -f {0} -v metainfo_key="\'{1}\'" -v metainfo_value="\'{2}\'" -v dbname="{3}"']
-FIX_LOCAL_REPO_CMD = ['su', 'postgres', '--command=psql -f {0} -v dbname="{1}"']
#Apache License Header
ASF_LICENSE_HEADER = '''
@@ -1331,51 +1330,6 @@ def get_db_cli_tool(args):
return None
-def remote_stack_upgrade(args, scriptPath, stackId):
- tool = get_db_cli_tool(args)
- if not tool:
- args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
- if VERBOSE:
- print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
- return -1, "Client wasn't found", "Client wasn't found"
-
- #TODO add support of other databases with scripts
- stack_name, stack_version = stackId.split(STACK_NAME_VER_SEP)
- if args.database == "oracle":
- sid_or_sname = "sid"
- if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
- (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
- print_info_msg("using SERVICE_NAME instead of SID for Oracle")
- sid_or_sname = "service_name"
-
- retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- scriptPath,
- sid_or_sname,
- stack_name,
- stack_version
- )))
- return retcode, out, err
- elif args.database == "mysql":
- retcode, out, err = run_in_shell('{0} {1}'.format(tool, MYSQL_UPGRADE_STACK_ARGS.format(
- args.database_host,
- args.database_port,
- args.database_username,
- args.database_password,
- args.database_name,
- scriptPath,
- stack_name,
- stack_version
- )))
- return retcode, out, err
-
- return -2, "Wrong database", "Wrong database"
- pass
-
#execute SQL script on remote database
def execute_remote_script(args, scriptPath):
tool = get_db_cli_tool(args)
@@ -1427,136 +1381,6 @@ def execute_remote_script(args, scriptPath):
return -2, "Wrong database", "Wrong database"
-def prepare_stack_upgrade_command(args, stackId):
- db_index = DATABASE_NAMES.index(args.database)
- tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
- scriptPath = DATABASE_STACK_UPGRADE_SCRIPTS[db_index]
-
- stack_name, stack_version = stackId.split(STACK_NAME_VER_SEP)
- if args.database == "oracle":
- sid_or_sname = "sid"
- if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
- (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
- print_info_msg("using SERVICE_NAME instead of SID for Oracle")
- sid_or_sname = "service_name"
-
- command = '{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- scriptPath,
- sid_or_sname,
- stack_name,
- stack_version
- )).strip()
- return command
- elif args.database == "mysql":
- command = '{0} {1}'.format(tool, MYSQL_UPGRADE_STACK_ARGS.format(
- args.database_host,
- args.database_port,
- args.database_username,
- args.database_password,
- args.database_name,
- scriptPath,
- stack_name,
- stack_version
- )).strip()
- return command
- pass
-
-
-def prepare_schema_upgrade_command(args):
- db_index = DATABASE_NAMES.index(args.database)
- tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
- scriptPath = DATABASE_UPGRADE_SCRIPTS[db_index]
-
- if args.database == "postgres":
- os.environ["PGPASSWORD"] = args.database_password
- command = '{0} {1}'.format(tool, POSTGRES_EXEC_ARGS.format(
- args.database_host,
- args.database_port,
- args.database_name,
- args.database_username,
- scriptPath
- )).strip()
- return command
- elif args.database == "oracle":
- sid_or_sname = "sid"
- if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
- (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
- print_info_msg("using SERVICE_NAME instead of SID for Oracle")
- sid_or_sname = "service_name"
-
- command = '{0} {1}'.format(tool, ORACLE_EXEC_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- scriptPath,
- sid_or_sname
- )).strip()
-
- return command
- elif args.database == "mysql":
- MYSQL_EXEC_ARGS = MYSQL_EXEC_ARGS_WO_USER_VARS if MYSQL_INIT_SCRIPT == scriptPath else MYSQL_EXEC_ARGS_WITH_USER_VARS
- command = '{0} {1}'.format(tool, MYSQL_EXEC_ARGS.format(
- args.database_host,
- args.database_port,
- args.database_username,
- args.database_password,
- args.database_name,
- scriptPath
- )).strip()
- return command
- pass
-
-def prepare_local_repo_upgrade_commands(args, dbkey, dbvalue):
- db_index = DATABASE_NAMES.index(args.database)
- tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
- scriptPath = DATABASE_INSERT_METAINFO_SCRIPTS[db_index]
-
- command_list = []
-
- if args.database == "oracle":
- sid_or_sname = "sid"
- if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
- (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
- print_info_msg("using SERVICE_NAME instead of SID for Oracle")
- sid_or_sname = "service_name"
-
- command_list.append('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- scriptPath,
- sid_or_sname,
- dbkey,
- dbvalue
- )).strip())
-
- command_list.append('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- DATABASE_FIX_LOCAL_REPO_SCRIPTS[db_index],
- sid_or_sname,
- '',
- ''
- )).strip())
-
-
- return command_list
-
def configure_database_password(showDefault=True):
passwordDefault = PG_DEFAULT_PASSWORD
if showDefault:
@@ -1686,6 +1510,7 @@ def parse_properties_file(args):
print_error_msg ("Error getting ambari properties")
return -1
+ args.server_version_file_path = properties[SERVER_VERSION_FILE_PATH]
args.persistence_type = properties[PERSISTENCE_TYPE_PROPERTY]
args.jdbc_url = properties[JDBC_URL_PROPERTY]
@@ -2661,43 +2486,13 @@ def upgrade_stack(args, stack_id):
raise FatalException(4, err)
check_database_name_property()
- parse_properties_file(args)
- if args.persistence_type == "remote":
- client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
- client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_STACK_UPGRADE_SCRIPTS[DATABASE_INDEX], args.database_username,
- BLIND_PASSWORD, args.database_name)
- #TODO temporarty code
- if not args.database in ["oracle", "mysql"]:
- raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
-
- if get_db_cli_tool(args):
- retcode, out, err = remote_stack_upgrade(args, DATABASE_STACK_UPGRADE_SCRIPTS[DATABASE_INDEX], stack_id)
- if not retcode == 0:
- raise NonFatalException(err)
+ stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
+ retcode = run_stack_upgrade(stack_name, stack_version)
- else:
- command = prepare_stack_upgrade_command(args, stack_id)
- err = 'Cannot find ' + client_desc + ' client in the path to upgrade the Ambari ' + \
- 'Server stack. To upgrade stack of Ambari Server ' + \
- 'you must run the following command:' + \
- os.linesep + command
- args.warnings.append(err)
+ if not retcode == 0:
+ raise FatalException(retcode, 'Stack upgrade failed.')
- pass
- else:
- #password access to ambari-server and mapred
- configure_database_username_password(args)
- dbname = args.database_name
- file = args.upgrade_stack_script_file
- stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
- command = UPGRADE_STACK_CMD[:]
- command[-1] = command[-1].format(file, stack_name, stack_version, dbname)
- retcode, outdata, errdata = run_os_command(command)
- if not retcode == 0:
- raise FatalException(retcode, errdata)
- if errdata:
- print_warning_msg(errdata)
- return retcode
+ return retcode
def load_stack_values(version, filename):
import xml.etree.ElementTree as ET
@@ -2719,108 +2514,6 @@ def load_stack_values(version, filename):
return values
-def upgrade_local_repo_remote_db(args, sqlfile, dbkey, dbvalue):
- tool = get_db_cli_tool(args)
- if not tool:
- # args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
- if VERBOSE:
- print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
- return -1, "Client wasn't found", "Client wasn't found"
-
- #TODO add support of other databases with scripts
- if args.database == "oracle":
- sid_or_sname = "sid"
- if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
- (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
- print_info_msg("using SERVICE_NAME instead of SID for Oracle")
- sid_or_sname = "service_name"
-
- retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- sqlfile,
- sid_or_sname,
- dbkey,
- dbvalue
- )))
-
- retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
- args.database_username,
- args.database_password,
- args.database_host,
- args.database_port,
- args.database_name,
- DATABASE_FIX_LOCAL_REPO_SCRIPTS[DATABASE_INDEX],
- sid_or_sname,
- '',
- ''
- )))
- return retcode, out, err
-
- return -2, "Wrong database", "Wrong database"
- pass
-
-def upgrade_local_repo_db(args, dbkey, dbvalue):
- if not is_root():
- err = 'Ambari-server upgrade_local_repo_db should be run with ' \
- 'root-level privileges'
- raise FatalException(4, err)
- check_database_name_property()
-
- parse_properties_file(args)
- if args.persistence_type == "remote":
- client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
- client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_INSERT_METAINFO_SCRIPTS[DATABASE_INDEX], args.database_username,
- BLIND_PASSWORD, args.database_name)
- #TODO temporary code
- if not args.database == "oracle":
- raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
-
- if get_db_cli_tool(args):
- retcode, out, err = upgrade_local_repo_remote_db(args, DATABASE_INSERT_METAINFO_SCRIPTS[DATABASE_INDEX],
- dbkey, dbvalue)
- if not retcode == 0:
- raise NonFatalException(err)
-
- else:
- commands = prepare_local_repo_upgrade_commands(args, dbkey, dbvalue)
- err = 'Cannot find ' + client_desc + ' client in the path to upgrade the local ' + \
- 'repo information. To upgrade local repo information. ' + \
- 'you must run the following commands:'
- for command in commands:
- err = err + os.linesep + command
- pass
- args.warnings.append(err)
-
- pass
- else:
- #password access to ambari-server and mapred
- configure_database_username_password(args)
- dbname = args.database_name
- sqlfile = DATABASE_INSERT_METAINFO_SCRIPTS[0]
- command = INSERT_METAINFO_CMD[:]
- command[-1] = command[-1].format(sqlfile, dbkey, dbvalue, dbname)
- retcode, outdata, errdata = run_os_command(command)
- if not retcode == 0:
- raise FatalException(retcode, errdata)
- if errdata:
- print_warning_msg(errdata)
-
- sqlfile = DATABASE_FIX_LOCAL_REPO_SCRIPTS[0]
- command = FIX_LOCAL_REPO_CMD[:]
- command[-1] = command[-1].format(sqlfile, dbname)
- retcode, outdata, errdata = run_os_command(command)
- if not retcode == 0:
- raise FatalException(retcode, errdata)
- if errdata:
- print_warning_msg(errdata)
-
- return retcode
- pass
-
def get_stack_location(properties):
stack_location = properties[STACK_LOCATION_KEY]
@@ -2855,6 +2548,8 @@ def upgrade_local_repo(args):
print_info_msg("Local repo file: " + repo_file_local)
print_info_msg("Repo file: " + repo_file_local)
+ metainfo_update_items = {}
+
if os.path.exists(repo_file_local) and os.path.exists(repo_file):
local_values = load_stack_values(stack_version_local, repo_file_local)
repo_values = load_stack_values(stack_version_local, repo_file)
@@ -2863,7 +2558,65 @@ def upgrade_local_repo(args):
local_url = local_values[k]
repo_url = repo_values[k]
if repo_url != local_url:
- upgrade_local_repo_db(args, k, local_url)
+ metainfo_update_items[k] = local_url
+
+ run_metainfo_upgrade(metainfo_update_items)
+
+
+def run_schema_upgrade(version):
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+ command = SCHEMA_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+ get_conf_dir(), version)
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
+ if retcode > 0:
+ print_error_msg("Error executing schema upgrade, please check the server logs.")
+ return retcode
+
+def run_stack_upgrade(stackName, stackVersion):
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+ return 1
+ stackId = {}
+ stackId[stackName] = stackVersion
+
+ command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+ get_conf_dir(), "updateStackId",
+ json.dumps(stackId))
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+ if retcode > 0:
+ print_error_msg("Error executing stack upgrade, please check the server logs.")
+ return retcode
+
+def run_metainfo_upgrade(keyValueMap = None):
+ jdk_path = find_jdk()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + JDK_INSTALL_DIR)
+
+ retcode = 1
+ if keyValueMap:
+ command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+ get_conf_dir(), 'updateMetaInfo',
+ json.dumps(keyValueMap))
+ (retcode, stdout, stderr) = run_os_command(command)
+ print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+ if retcode > 0:
+ print_error_msg("Error executing metainfo upgrade, please check the "
+ "server logs.")
+
+ return retcode
+
#
# Upgrades the Ambari Server.
@@ -2898,58 +2651,18 @@ def upgrade(args):
return -1
parse_properties_file(args)
- if args.persistence_type == "remote":
- client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
- client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_UPGRADE_SCRIPTS[DATABASE_INDEX], args.database_username,
- BLIND_PASSWORD, args.database_name)
+ server_version = None
+ if args.server_version_file_path:
+ with open(args.server_version_file_path, 'r') as f:
+ server_version = f.read()
- #TODO temporarty code
- if not args.database in ["oracle", "mysql"]:
- raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
+ if not server_version:
+ raise FatalException('Cannot determine server version from version file '
+ '%s' % args.server_version_file_path)
- if get_db_cli_tool(args):
- retcode, out, err = execute_remote_script(args, DATABASE_UPGRADE_SCRIPTS[DATABASE_INDEX])
- if not retcode == 0:
- raise NonFatalException(err)
-
- else:
- command = prepare_schema_upgrade_command(args)
- err = 'Cannot find ' + client_desc + ' client in the path to upgrade the Ambari ' + \
- 'Server schema. To upgrade Ambari Server schema ' + \
- 'you must run the following command:' + \
- os.linesep + command
- args.warnings.append(err)
-
- pass
- else:
- print 'Checking PostgreSQL...'
- retcode = check_postgre_up()
- if not retcode == 0:
- err = 'PostgreSQL server not running. Exiting'
- raise FatalException(retcode, err)
-
- file = args.upgrade_script_file
- print 'Upgrading database...'
- retcode = execute_db_script(args, file)
- if not retcode == 0:
- err = 'Database upgrade script has failed. Exiting.'
- raise FatalException(retcode, err)
-
-
- print 'Checking database integrity...'
- check_file = file[:-3] + "Check" + file[-4:]
- retcode = check_db_consistency(args, check_file)
-
- if not retcode == 0:
- print 'Found inconsistency. Trying to fix...'
- fix_file = file[:-3] + "Fix" + file[-4:]
- retcode = execute_db_script(args, fix_file)
-
- if not retcode == 0:
- err = 'Database cannot be fixed. Exiting.'
- raise FatalException(retcode, err)
- else:
- print 'Database is consistent.'
+ retcode = run_schema_upgrade(server_version.strip())
+ if not retcode == 0:
+ raise FatalException('Scehma upgrade failed.')
user = read_ambari_user()
if user is None:
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
deleted file mode 100644
index eb4f2a3..0000000
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
+++ /dev/null
@@ -1,95 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements. See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership. The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-
-
--- DDL
-
---Upgrade version to current
-UPDATE metainfo SET "metainfo_value" = '${ambariVersion}' WHERE metainfo_key = 'version';
-
-ALTER TABLE hostcomponentdesiredstate ADD admin_state VARCHAR(32);
-
---1.5.0
-CREATE TABLE request (request_id BIGINT NOT NULL, cluster_id BIGINT, request_schedule_id BIGINT, command_name VARCHAR(255), create_time BIGINT NOT NULL, end_time BIGINT NOT NULL, inputs LONGTEXT, request_context VARCHAR(255), request_type VARCHAR(255), start_time BIGINT NOT NULL, status VARCHAR(255), target_component VARCHAR(255), target_hosts LONGTEXT, target_service VARCHAR(255), PRIMARY KEY (request_id));
-CREATE TABLE requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, description varchar(255), status varchar(255), batch_separation_seconds smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
-CREATE TABLE requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body LONGBLOB, request_status varchar(255), return_code smallint, return_message varchar(2000), PRIMARY KEY(schedule_id, batch_id));
-
-insert into request(request_id, cluster_id, request_context, start_time, end_time, create_time)
- select distinct s.request_id, s.cluster_id, s.request_context, coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1
- from
- (select distinct request_id, cluster_id, request_context from stage ) s
- left join
- (select request_id, min(start_time) as start_time, max(end_time) as end_time from host_role_command group by request_id) cmd
- on s.request_id=cmd.request_id;
-
-ALTER TABLE stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id);
-ALTER TABLE requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
-
-
-
---quartz tables
-CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME VARCHAR(120) NOT NULL, JOB_NAME VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, JOB_CLASS_NAME VARCHAR(250) NOT NULL, IS_DURABLE VARCHAR(1) NOT NULL, IS_NONCONCURRENT VARCHAR(1) NOT NULL, IS_UPDATE_DATA VARCHAR(1) NOT NULL, REQUESTS_RECOVERY VARCHAR(1) NOT NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, JOB_NAME VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, NEXT_FIRE_TIME BIGINT(13) NULL, PREV_FIRE_TIME BIGINT(13) NULL, PRIORITY INTEGER NULL, TRIGGER_STATE VARCHAR(16) NOT NULL, TRIGGER_TYPE VARCHAR(8) NOT NULL, START_TIME BIGINT(13) NOT NULL, END_TIME BIGINT(13) NULL, CALENDAR_NAME VARCHAR(200) NULL, MISFIRE_INSTR SMALLINT(2) NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, REPEAT_COUNT BIGINT(7) NOT NULL, REPEAT_INTERVAL BIGINT(12) NOT NULL, TIMES_TRIGGERED BIGINT(10) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, CRON_EXPRESSION VARCHAR(200) NOT NULL, TIME_ZONE_ID VARCHAR(80), PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, STR_PROP_1 VARCHAR(512) NULL, STR_PROP_2 VARCHAR(512) NULL, STR_PROP_3 VARCHAR(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 VARCHAR(1) NULL, BOOL_PROP_2 VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, BLOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME VARCHAR(120) NOT NULL, CALENDAR_NAME VARCHAR(200) NOT NULL, CALENDAR BLOB NOT NULL, PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
-CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, ENTRY_ID VARCHAR(95) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, FIRED_TIME BIGINT(13) NOT NULL, SCHED_TIME BIGINT(13) NOT NULL, PRIORITY INTEGER NOT NULL, STATE VARCHAR(16) NOT NULL, JOB_NAME VARCHAR(200) NULL, JOB_GROUP VARCHAR(200) NULL, IS_NONCONCURRENT VARCHAR(1) NULL, REQUESTS_RECOVERY VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
-CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME VARCHAR(120) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, LAST_CHECKIN_TIME BIGINT(13) NOT NULL, CHECKIN_INTERVAL BIGINT(13) NOT NULL, PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
-CREATE TABLE QRTZ_LOCKS ( SCHED_NAME VARCHAR(120) NOT NULL, LOCK_NAME VARCHAR(40) NOT NULL, PRIMARY KEY (SCHED_NAME,LOCK_NAME) );
-
-create index idx_qrtz_j_req_recovery on QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_j_grp on QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
-
-create index idx_qrtz_t_j on QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_t_jg on QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_t_c on QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
-create index idx_qrtz_t_g on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
-create index idx_qrtz_t_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
-create index idx_qrtz_t_n_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_n_g_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_next_fire_time on QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
-create index idx_qrtz_t_nft_st_misfire_grp on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
-
-create index idx_qrtz_ft_trig_inst_name on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
-create index idx_qrtz_ft_inst_job_req_rcvry on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_ft_j_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_ft_jg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_ft_t_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
-create index idx_qrtz_ft_tg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
-
-ALTER TABLE hostcomponentdesiredstate ADD passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE';
-ALTER TABLE servicedesiredstate ADD passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE';
-ALTER TABLE hoststate ADD passive_state VARCHAR(512);
-ALTER TABLE host_role_command ADD command_detail VARCHAR(255);
-ALTER TABLE host_role_command ADD custom_command_name VARCHAR(255);
-
--- blueprint related tables
-CREATE TABLE blueprint (blueprint_name VARCHAR(255) NOT NULL, stack_name VARCHAR(255) NOT NULL, stack_version VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name));
-CREATE TABLE hostgroup (blueprint_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, cardinality VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, name));
-CREATE TABLE hostgroup_component (blueprint_name VARCHAR(255) NOT NULL, hostgroup_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, hostgroup_name, name));
-
-ALTER TABLE hostgroup ADD FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
-ALTER TABLE hostgroup_component ADD FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup(blueprint_name, name);