You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2014/02/21 20:03:57 UTC

[1/4] AMBARI-4716. Run Ambari Server Upgrade via code rather than DDL/DML. (mpapirkovskyy)

Repository: ambari
Updated Branches:
  refs/heads/trunk c02c7bf50 -> fea7b6222


http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/test/python/ambari.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/ambari.properties b/ambari-server/src/test/python/ambari.properties
new file mode 100644
index 0000000..f59d320
--- /dev/null
+++ b/ambari-server/src/test/python/ambari.properties
@@ -0,0 +1 @@
+server.jdbc.database=oldDBName
\ No newline at end of file


[3/4] AMBARI-4716. Run Ambari Server Upgrade via code rather than DDL/DML. (mpapirkovskyy)

Posted by mp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
new file mode 100644
index 0000000..b9fec20
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.gson.Gson;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.InputMismatchException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class StackUpgradeHelper {
+  private static final Logger LOG = LoggerFactory.getLogger
+    (StackUpgradeHelper.class);
+
+  private static final String STACK_ID_UPDATE_ACTION = "updateStackId";
+  private static final String METAINFO_UPDATE_ACTION = "updateMetaInfo";
+  private static final String STACK_ID_STACK_NAME_KEY = "stackName";
+
+  @Inject
+  private DBAccessor dbAccessor;
+  @Inject
+  private PersistService persistService;
+  @Inject
+  private MetainfoDAO metainfoDAO;
+  @Inject
+  private StackUpgradeUtil stackUpgradeUtil;
+
+  private void startPersistenceService() {
+    persistService.start();
+  }
+
+  private void stopPersistenceService() {
+    persistService.stop();
+  }
+
+  /**
+   * Add key value to the metainfo table.
+   * @param data
+   * @throws SQLException
+   */
+  @Transactional
+  private void updateMetaInfo(Map<String, String> data) throws SQLException {
+    if (data != null && !data.isEmpty()) {
+      for (Map.Entry<String, String> entry : data.entrySet()) {
+        MetainfoEntity metainfoEntity = metainfoDAO.findByKey(entry.getKey());
+        if (metainfoEntity != null) {
+          metainfoEntity.setMetainfoName(entry.getKey());
+          metainfoEntity.setMetainfoValue(entry.getValue());
+          metainfoDAO.merge(metainfoEntity);
+        } else {
+          metainfoEntity = new MetainfoEntity();
+          metainfoEntity.setMetainfoName(entry.getKey());
+          metainfoEntity.setMetainfoValue(entry.getValue());
+          metainfoDAO.create(metainfoEntity);
+        }
+      }
+    }
+  }
+
+  /**
+   * Change the stack id in the Ambari DB.
+   * @param stackId
+   * @throws SQLException
+   */
+  private void updateStackVersion(Map<String, String> stackId) throws SQLException {
+    if (stackId == null || stackId.isEmpty()) {
+      throw new IllegalArgumentException("Empty stack id. " + stackId);
+    }
+    Iterator<Map.Entry<String, String>> stackIdEntry = stackId.entrySet().iterator();
+    Map.Entry<String, String> stackEntry = stackIdEntry.next();
+
+    String stackName = stackEntry.getKey();
+    String stackVersion = stackEntry.getValue();
+
+    LOG.info("Updating stack id, stackName = " + stackName + ", " +
+      "stackVersion = "+ stackVersion);
+
+    stackUpgradeUtil.updateStackDetails(stackName, stackVersion);
+
+    dbAccessor.updateTable("hostcomponentstate", "current_state", "INSTALLED", "where current_state = 'UPGRADING'");
+  }
+
+  private List<String> getValidActions() {
+    return new ArrayList<String>() {{
+      add(STACK_ID_UPDATE_ACTION);
+      add(METAINFO_UPDATE_ACTION);
+    }};
+  }
+
+  /**
+   * Support changes need to support upgrade of Stack
+   * @param args Simple key value json map
+   */
+  public static void main(String[] args) {
+    try {
+      if (args.length < 2) {
+        throw new InputMismatchException("Need to provide action, " +
+          "stack name and stack version.");
+      }
+
+      String action = args[0];
+      String valueMap = args[1];
+
+      Injector injector = Guice.createInjector(new ControllerModule());
+      StackUpgradeHelper stackUpgradeHelper = injector.getInstance(StackUpgradeHelper.class);
+      Gson gson = injector.getInstance(Gson.class);
+
+      if (!stackUpgradeHelper.getValidActions().contains(action)) {
+        throw new IllegalArgumentException("Unsupported action. Allowed " +
+          "actions: " + stackUpgradeHelper.getValidActions());
+      }
+
+      stackUpgradeHelper.startPersistenceService();
+      Map values = gson.fromJson(valueMap, Map.class);
+
+      if (action.equals(STACK_ID_UPDATE_ACTION)) {
+        stackUpgradeHelper.updateStackVersion(values);
+      } else if (action.equals(METAINFO_UPDATE_ACTION)) {
+
+        stackUpgradeHelper.updateMetaInfo(values);
+      }
+
+      stackUpgradeHelper.stopPersistenceService();
+
+    } catch (Throwable t) {
+      LOG.error("Caught exception on upgrade. Exiting...", t);
+      System.exit(1);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
new file mode 100644
index 0000000..75189cc
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.StackId;
+import java.util.ArrayList;
+import java.util.List;
+
+public class StackUpgradeUtil {
+  @Inject
+  private Gson gson;
+  @Inject
+  private Injector injector;
+
+  private String getStackIdString(String originalStackId, String stackName,
+                                  String stackVersion) {
+    if (stackVersion == null) {
+      stackVersion = gson.fromJson(originalStackId, StackId.class).getStackVersion();
+    }
+
+    return String.format(
+      "{\"stackName\":\"%s\",\"stackVersion\":\"%s\"}",
+      stackName,
+      stackVersion
+    );
+  }
+
+  @Transactional
+  public void updateStackDetails(String stackName, String stackVersion) {
+    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+    List<Long> clusterIds = new ArrayList<Long>();
+
+    List<ClusterEntity> clusterEntities = clusterDAO.findAll();
+    if (clusterEntities != null && !clusterEntities.isEmpty()) {
+      for (ClusterEntity entity : clusterEntities) {
+        clusterIds.add(entity.getClusterId());
+        String stackIdString = entity.getDesiredStackVersion();
+        entity.setDesiredStackVersion(getStackIdString(stackIdString,
+          stackName, stackVersion));
+        clusterDAO.merge(entity);
+      }
+    }
+
+    ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
+
+    for (Long clusterId : clusterIds) {
+      ClusterStateEntity clusterStateEntity = clusterStateDAO.findByPK(clusterId);
+      String currentStackVersion = clusterStateEntity.getCurrentStackVersion();
+      clusterStateEntity.setCurrentStackVersion(getStackIdString
+        (currentStackVersion, stackName, stackVersion));
+      clusterStateDAO.merge(clusterStateEntity);
+    }
+
+    HostComponentStateDAO hostComponentStateDAO = injector.getInstance
+      (HostComponentStateDAO.class);
+    List<HostComponentStateEntity> hcEntities = hostComponentStateDAO.findAll();
+
+    if (hcEntities != null) {
+      for (HostComponentStateEntity hc : hcEntities) {
+        String currentStackVersion = hc.getCurrentStackVersion();
+        hc.setCurrentStackVersion(getStackIdString(currentStackVersion,
+          stackName, stackVersion));
+        hostComponentStateDAO.merge(hc);
+      }
+    }
+
+    HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
+      injector.getInstance(HostComponentDesiredStateDAO.class);
+
+    List<HostComponentDesiredStateEntity> hcdEntities = hostComponentDesiredStateDAO.findAll();
+
+    if (hcdEntities != null) {
+      for (HostComponentDesiredStateEntity hcd : hcdEntities) {
+        String desiredStackVersion = hcd.getDesiredStackVersion();
+        hcd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+          stackName, stackVersion));
+        hostComponentDesiredStateDAO.merge(hcd);
+      }
+    }
+
+    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
+      injector.getInstance(ServiceComponentDesiredStateDAO.class);
+
+    List<ServiceComponentDesiredStateEntity> scdEntities =
+      serviceComponentDesiredStateDAO.findAll();
+
+    if (scdEntities != null) {
+      for (ServiceComponentDesiredStateEntity scd : scdEntities) {
+        String desiredStackVersion = scd.getDesiredStackVersion();
+        scd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+          stackName, stackVersion));
+        serviceComponentDesiredStateDAO.merge(scd);
+      }
+    }
+
+    ServiceDesiredStateDAO serviceDesiredStateDAO = injector.getInstance(ServiceDesiredStateDAO.class);
+
+    List<ServiceDesiredStateEntity> sdEntities = serviceDesiredStateDAO.findAll();
+
+    if (sdEntities != null) {
+      for (ServiceDesiredStateEntity sd : sdEntities) {
+        String desiredStackVersion = sd.getDesiredStackVersion();
+        sd.setDesiredStackVersion(getStackIdString(desiredStackVersion,
+          stackName, stackVersion));
+        serviceDesiredStateDAO.merge(sd);
+      }
+    }
+
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
new file mode 100644
index 0000000..07384cc
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import org.apache.ambari.server.AmbariException;
+
+import java.sql.SQLException;
+
+/**
+ * Interface for upgrading Ambari DB
+ */
+public interface UpgradeCatalog {
+  /**
+   * Run the upgrade scripts for upgrading ambari server from current version
+   * to the new version.
+   * @throws AmbariException
+   */
+  public void upgradeSchema() throws AmbariException, SQLException;
+
+  /**
+   * Start persistence service and perform updates as necessary
+   * @throws AmbariException
+   * @throws SQLException
+   */
+  public void executeDMLUpdates() throws AmbariException, SQLException;
+
+  /**
+   * Return the version that will be upgraded to
+   * @return
+   */
+  public abstract String getTargetVersion();
+
+  /**
+   * Return latest source version that can be upgraded from.
+   * Return null since no UpgradeCatalogs exist before this one.
+   *
+   * @return null : default
+   */
+  public String getSourceVersion();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
new file mode 100644
index 0000000..86bbef3
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
@@ -0,0 +1,487 @@
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.state.State;
+import org.eclipse.persistence.jpa.JpaEntityManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+import java.io.File;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog150.class);
+  private static final String quartzScriptFilePattern = "quartz.%s.sql";
+  private Injector injector;
+
+  @Inject
+  public UpgradeCatalog150(Injector injector) {
+    super(injector);
+    this.injector = injector;
+  }
+
+  @Override
+  public void executeDDLUpdates() throws AmbariException, SQLException {
+    LOG.debug("Upgrading schema...");
+
+    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+
+    // ========================================================================
+    // Create tables
+
+    // ClusterConfigMapping
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("type_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("version_tag", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("selected", Integer.class, 0, null, false));
+    columns.add(new DBColumnInfo("user_name", String.class, 255, "_db", false));
+
+    dbAccessor.createTable("clusterconfigmapping", columns, "cluster_id", "type_name", "create_timestamp");
+
+    // Request
+    columns.clear();
+    columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("request_schedule_id", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("command_name", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("create_time", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("end_time", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("inputs", byte[].class, null, null, true));
+    columns.add(new DBColumnInfo("request_context", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("request_type", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("start_time", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("target_component", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("target_hosts", String.class, null, null, false));
+    columns.add(new DBColumnInfo("target_service", String .class, 255, null, true));
+
+    dbAccessor.createTable("request", columns, "request_id");
+
+    // RequestSchedule
+    columns.clear();
+    columns.add(new DBColumnInfo("schedule_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("description", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("status", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("batch_separation_seconds", Integer.class, null, null, true));
+    columns.add(new DBColumnInfo("batch_toleration_limit", Integer.class, null, null, true));
+    columns.add(new DBColumnInfo("create_user", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("update_user", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("update_timestamp", Long.class, null, null, true));
+    columns.add(new DBColumnInfo("minutes", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("hours", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("days_of_month", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("month", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("day_of_week", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("yearToSchedule", String.class, 10, null, true));
+    columns.add(new DBColumnInfo("startTime", String.class, 50, null, true));
+    columns.add(new DBColumnInfo("endTime", String.class, 50, null, true));
+    columns.add(new DBColumnInfo("last_execution_status", String.class, 255, null, true));
+
+    dbAccessor.createTable("requestschedule", columns, "schedule_id");
+
+    // RequestScheduleBatchRequest
+    columns.clear();
+    columns.add(new DBColumnInfo("schedule_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("batch_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("request_type", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("request_uri", String.class, 1024, null, true));
+    columns.add(new DBColumnInfo("request_body", byte[].class, null, null, true));
+    columns.add(new DBColumnInfo("request_status", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("return_code", Integer.class, null, null, true));
+    columns.add(new DBColumnInfo("return_message", String.class, 2000, null, true));
+
+    dbAccessor.createTable("requestschedulebatchrequest", columns, "schedule_id", "batch_id");
+
+    // HostConfigMapping
+    columns.clear();
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("type_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("version_tag", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("service_name", String.class, 255, null, true));
+    columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("selected", Integer.class, 0, null, false));
+
+    dbAccessor.createTable("hostconfigmapping", columns, "cluster_id", "host_name", "type_name", "create_timestamp");
+
+    // Sequences
+    columns.clear();
+    columns.add(new DBColumnInfo("sequence_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("value", Long.class, null, null, false));
+
+    dbAccessor.createTable("ambari_sequences", columns, "sequence_name");
+
+    // Metainfo
+
+    columns.clear();
+    columns.add(new DBColumnInfo("metainfo_key", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("metainfo_value", String.class, 255, null, false));
+
+    dbAccessor.createTable("metainfo", columns, "metainfo_key");
+
+    // ConfigGroup
+    columns.clear();
+    columns.add(new DBColumnInfo("group_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("group_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("tag", String.class, 1024, null, false));
+    columns.add(new DBColumnInfo("description", String.class, 1024, null, true));
+    columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+
+    dbAccessor.createTable("configgroup", columns, "group_id");
+
+    // ConfigGroupClusterConfigMapping
+    columns.clear();
+    columns.add(new DBColumnInfo("config_group_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("config_type", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("version_tag", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("user_name", String.class, 255, "_db", true));
+    columns.add(new DBColumnInfo("create_timestamp", Long.class, null, null, false));
+
+    dbAccessor.createTable("confgroupclusterconfigmapping", columns, "config_group_id", "cluster_id", "config_type");
+
+    // ConfigGroupHostMapping
+    columns.clear();
+    columns.add(new DBColumnInfo("config_group_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
+
+    dbAccessor.createTable("configgrouphostmapping", columns, "config_group_id", "host_name");
+
+    // Blueprint
+    columns.clear();
+    columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("stack_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("stack_version", String.class, 255, null, false));
+
+    dbAccessor.createTable("blueprint", columns, "blueprint_name");
+
+    // HostGroup
+    columns.clear();
+    columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("cardinality", String.class, 255, null, false));
+
+    dbAccessor.createTable("hostgroup", columns, "blueprint_name", "name");
+
+    // HostGroupComponent
+    columns.clear();
+    columns.add(new DBColumnInfo("blueprint_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("hostgroup_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("name", String.class, 255, null, false));
+
+    dbAccessor.createTable("hostgroup_component", columns, "blueprint_name", "hostgroup_name", "name");
+
+    createQuartzTables();
+
+    // ========================================================================
+    // Add columns
+
+    dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo("passive_state", String.class, 32, "ACTIVE", false));
+    dbAccessor.addColumn("servicedesiredstate", new DBColumnInfo("passive_state", String.class, 32, "ACTIVE", false));
+    dbAccessor.addColumn("hoststate", new DBColumnInfo("passive_state", String.class, 512, null, true));
+    dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo("admin_state", String.class, 32, null, true));
+    dbAccessor.addColumn("hosts", new DBColumnInfo("ph_cpu_count", Integer.class, 32, null, true));
+    dbAccessor.addColumn("clusterstate", new DBColumnInfo("current_stack_version", String.class, 255, null, false));
+    dbAccessor.addColumn("hostconfigmapping", new DBColumnInfo("user_name", String.class, 255, "_db", false));
+    dbAccessor.addColumn("stage", new DBColumnInfo("request_context", String.class, 255, null, true));
+    dbAccessor.addColumn("stage", new DBColumnInfo("cluster_host_info", byte[].class, null, null, true));
+    dbAccessor.addColumn("clusterconfigmapping", new DBColumnInfo("user_name", String.class, 255, "_db", false));
+    dbAccessor.addColumn("host_role_command", new DBColumnInfo("end_time", Long.class, null, null, true));
+    dbAccessor.addColumn("host_role_command", new DBColumnInfo("structured_out", byte[].class, null, null, true));
+    dbAccessor.addColumn("host_role_command", new DBColumnInfo("command_detail", String.class, 255, null, true));
+    dbAccessor.addColumn("host_role_command", new DBColumnInfo("custom_command_name", String.class, 255, null, true));
+
+    // Alter columns
+
+    if (getDbType().equals(Configuration.POSTGRES_DB_NAME)) {
+      try {
+        dbAccessor.executeQuery("ALTER TABLE hostcomponentdesiredconfigmapping rename to hcdesiredconfigmapping;");
+        dbAccessor.executeQuery("ALTER TABLE users ALTER column user_id DROP DEFAULT;");
+        dbAccessor.executeQuery("ALTER TABLE users ALTER column ldap_user TYPE INTEGER USING CASE WHEN ldap_user=true THEN 1 ELSE 0 END;");
+        dbAccessor.executeQuery("ALTER TABLE hosts DROP COLUMN disks_info;");
+      } catch (SQLException e) {
+        LOG.warn("Error encountered while altering schema. ", e);
+        // continue updates
+      }
+    }
+
+    // ========================================================================
+    // Add constraints
+
+    dbAccessor.addFKConstraint("stage", "FK_stage_request_id", "request_id", "request", "request_id", true);
+    dbAccessor.addFKConstraint("request", "FK_request_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+    dbAccessor.addFKConstraint("request", "FK_request_schedule_id", "request_schedule_id", "requestschedule", "schedule_id", true);
+    dbAccessor.addFKConstraint("requestschedulebatchrequest", "FK_requestschedulebatchrequest_schedule_id", "schedule_id", "requestschedule", "schedule_id", true);
+    dbAccessor.addFKConstraint("hostconfigmapping", "FK_hostconfigmapping_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+    dbAccessor.addFKConstraint("hostconfigmapping", "FK_hostconfigmapping_host_name", "host_name", "hosts", "host_name", true);
+    dbAccessor.addFKConstraint("configgroup", "FK_configgroup_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+    dbAccessor.addFKConstraint("confgroupclusterconfigmapping", "FK_cg_cluster_cm_config_tag", new String[] {"version_tag", "config_type", "cluster_id"}, "clusterconfig", new String[] {"version_tag", "type_name", "cluster_id"}, true);
+    dbAccessor.addFKConstraint("confgroupclusterconfigmapping", "FK_cg_cluster_cm_group_id", "config_group_id", "configgroup", "group_id", true);
+    dbAccessor.addFKConstraint("confgrouphostmapping", "FK_cghostm_configgroup_id", "config_group_id", "configgroup", "group_id", true);
+    dbAccessor.addFKConstraint("confgrouphostmapping", "FK_cghostm_host_name", "host_name", "hosts", "host_name", true);
+    dbAccessor.addFKConstraint("clusterconfigmapping", "FK_clustercfgmap_cluster_id", "cluster_id", "clusters", "cluster_id", true);
+
+    // ========================================================================
+    // Finally update schema version
+    updateMetaInfoVersion(getTargetVersion());
+  }
+
+  @Override
+  public void executeDMLUpdates() throws AmbariException, SQLException {
+    // Service Config mapping
+    String tableName = "serviceconfigmapping";
+    String dbType = getDbType();
+
+    EntityManager em = getEntityManagerProvider().get();
+
+    //unable to do via dao, as they were dropped
+    //TODO should we move this to ddl and drop unused tables then?
+    if (dbAccessor.tableExists(tableName)
+      && dbAccessor.tableHasData(tableName)
+      && dbAccessor.tableExists("clusterconfigmapping")) {
+
+      if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+        // Service config mapping entity object will be deleted so need to
+        // proceed with executing as query
+
+        dbAccessor.executeQuery(getPostgresServiceConfigMappingQuery());
+
+        dbAccessor.truncateTable(tableName);
+
+      } else {
+        LOG.warn("Unsupported database for service config mapping query. " +
+          "database = " + dbType);
+      }
+    }
+
+
+    // TODO: Convert all possible native queries using Criteria builder
+    // Request Entries
+    tableName = "request";
+    if (dbAccessor.tableExists(tableName) &&
+      !dbAccessor.tableHasData(tableName)) {
+
+      String query;
+      if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+        query = getPostgresRequestUpgradeQuery();
+      } else if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
+        query = getOracleRequestUpgradeQuery();
+      } else {
+        query = getMysqlRequestUpgradeQuery();
+      }
+
+      dbAccessor.executeQuery(query);
+    }
+
+    // Sequences
+    if (dbAccessor.tableExists("ambari_sequences")) {
+      if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+        try {
+          dbAccessor.executeQuery(getPostgresSequenceUpgradeQuery());
+          // Deletes
+          dbAccessor.dropSequence("host_role_command_task_id_seq");
+          dbAccessor.dropSequence("users_user_id_seq");
+          dbAccessor.dropSequence("clusters_cluster_id_seq");
+        } catch (SQLException sql) {
+          LOG.warn("Sequence update threw exception. ", sql);
+        }
+      }
+    }
+
+    //clear cache due to direct table manipulation
+    ((JpaEntityManager)em.getDelegate()).getServerSession().getIdentityMapAccessor().invalidateAll();
+
+    // Updates
+
+    // HostComponentState
+    CriteriaBuilder cb = em.getCriteriaBuilder();
+    CriteriaQuery<HostComponentStateEntity> c1 = cb.createQuery(HostComponentStateEntity.class);
+    Root<HostComponentStateEntity> hsc = c1.from(HostComponentStateEntity.class);
+    Expression<String> exp = hsc.get("current_state");
+    List<String> statuses = new ArrayList<String>() {{
+       add("STOP_FAILED");
+       add("START_FAILED");
+    }};
+    Predicate predicate = exp.in(statuses);
+    c1.select(hsc).where(predicate);
+
+    TypedQuery<HostComponentStateEntity> q1 = em.createQuery(c1);
+    List<HostComponentStateEntity> r1 = q1.getResultList();
+
+    HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
+    if (r1 != null && !r1.isEmpty()) {
+      for (HostComponentStateEntity entity : r1) {
+        entity.setCurrentState(State.INSTALLED);
+        hostComponentStateDAO.merge(entity);
+      }
+    }
+
+    // HostRoleCommand
+    CriteriaQuery<HostRoleCommandEntity> c2 = cb.createQuery(HostRoleCommandEntity.class);
+    Root<HostRoleCommandEntity> hrc = c2.from(HostRoleCommandEntity.class);
+    statuses = new ArrayList<String>() {{
+      add("PENDING");
+      add("QUEUED");
+      add("IN_PROGRESS");
+    }};
+    exp = hrc.get("status");
+    predicate = exp.in(statuses);
+    c2.select(hrc).where(predicate);
+
+    TypedQuery<HostRoleCommandEntity> q2 = em.createQuery(c2);
+    List<HostRoleCommandEntity> r2 = q2.getResultList();
+
+    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
+    if (r2 != null && !r2.isEmpty()) {
+      for (HostRoleCommandEntity entity : r2) {
+        entity.setStatus(HostRoleStatus.ABORTED);
+        hostRoleCommandDAO.merge(entity);
+      }
+    }
+
+    // Stack version changes from HDPLocal to HDP
+    stackUpgradeUtil.updateStackDetails("HDP", null);
+
+    //create cluster state entities if not present
+    executeInTransaction(new Runnable() {
+      @Override
+      public void run() {
+        ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+        ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
+        List<ClusterEntity> clusterEntities = clusterDAO.findAll();
+        for (ClusterEntity clusterEntity : clusterEntities) {
+          if (clusterStateDAO.findByPK(clusterEntity.getClusterId()) == null) {
+            ClusterStateEntity clusterStateEntity = new ClusterStateEntity();
+            clusterStateEntity.setClusterEntity(clusterEntity);
+            clusterStateEntity.setCurrentStackVersion(clusterEntity.getDesiredStackVersion());
+
+            clusterStateDAO.create(clusterStateEntity);
+
+            clusterEntity.setClusterStateEntity(clusterStateEntity);
+
+            clusterDAO.merge(clusterEntity);
+          }
+        }
+      }
+    });
+
+
+
+  }
+
+  private String getPostgresServiceConfigMappingQuery() {
+    return "INSERT INTO clusterconfigmapping " +
+      "(cluster_id, type_name, version_tag, create_timestamp, selected) " +
+      "(SELECT DISTINCT cluster_id, config_type, config_tag, " +
+      "cast(date_part('epoch', now()) as bigint), 1 " +
+      "FROM serviceconfigmapping scm " +
+      "WHERE timestamp = (SELECT max(timestamp) FROM serviceconfigmapping " +
+      "WHERE cluster_id = scm.cluster_id AND config_type = scm.config_type))";
+  }
+
+  private String getPostgresSequenceUpgradeQuery() {
+    return "INSERT INTO ambari_sequences(sequence_name, \"value\") " +
+      "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
+      "UNION ALL " +
+      "SELECT 'user_id_seq', nextval('users_user_id_seq') " +
+      "UNION ALL " +
+      "SELECT 'host_role_command_id_seq', COALESCE((SELECT max(task_id) FROM host_role_command), 1) + 50 " +
+      "UNION ALL " +
+      "SELECT 'configgroup_id_seq', 1";
+  }
+
+  private String getPostgresRequestUpgradeQuery() {
+    return "insert into request" +
+      "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+      "(select distinct s.request_id, s.cluster_id, s.request_context, " +
+      "coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1 " +
+      "from " +
+      "(select distinct request_id, cluster_id, request_context from stage ) s " +
+      "left join " +
+      "(select request_id, min(start_time) as start_time, max(end_time) " +
+      "as end_time from host_role_command group by request_id) cmd";
+  }
+
+  private String getOracleRequestUpgradeQuery() {
+    return "INSERT INTO request" +
+      "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+      "SELECT DISTINCT s.request_id, s.cluster_id, s.request_context, " +
+      "nvl(cmd.start_time, -1), nvl(cmd.end_time, -1), -1" +
+      "FROM " +
+      "(SELECT DISTINCT request_id, cluster_id, request_context FROM stage ) s " +
+      "LEFT JOIN " +
+      "(SELECT request_id, min(start_time) as start_time, max(end_time) " +
+      "as end_time FROM host_role_command GROUP BY request_id) cmd " +
+      "ON s.request_id=cmd.request_id";
+  }
+
+  private String getMysqlRequestUpgradeQuery() {
+    return "insert into request" +
+      "(request_id, cluster_id, request_context, start_time, end_time, create_time) " +
+      "select distinct s.request_id, s.cluster_id, s.request_context, " +
+      "coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1 " +
+      "from " +
+      "(select distinct request_id, cluster_id, request_context from stage ) s " +
+      "left join " +
+      "(select request_id, min(start_time) as start_time, max(end_time) " +
+      "as end_time from host_role_command group by request_id) cmd " +
+      "on s.request_id=cmd.request_id";
+  }
+
+  private void createQuartzTables() throws SQLException {
+    String dbType = getDbType();
+
+    // Run script to create quartz tables
+    String scriptPath = configuration.getResourceDirPath() +
+      File.separator + "upgrade" + File.separator + "ddl" +
+      File.separator + String.format(quartzScriptFilePattern, dbType);
+
+    try {
+      dbAccessor.executeScript(scriptPath);
+    } catch (IOException e) {
+      LOG.error("Error reading file.", e);
+    }
+
+    // TODO: Verify if this is necessary and possible
+    if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+      grantAllPostgresPrivileges();
+    }
+  }
+
+  @Override
+  public String getTargetVersion() {
+    return "1.5.0";
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
index 785f4fd..aeceedf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
@@ -94,4 +94,17 @@ public class DateUtils {
     Date now = new Date();
     return time.after(now);
   }
+
+  public static void main(String[] args) {
+    String s = "INSERT INTO ambari_sequences(sequence_name, \"value\") " +
+      "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
+      "UNION ALL " +
+      "SELECT 'user_id_seq', nextval('users_user_id_seq') " +
+      "UNION ALL " +
+      "SELECT 'host_role_command_id_seq', COALESCE((SELECT max(task_id) FROM host_role_command), 1) + 50 " +
+      "UNION ALL " +
+      "SELECT 'configgroup_id_seq', 1";
+
+    System.out.println(s);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
index d3df4e5..9e3c0c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/VersionUtils.java
@@ -129,4 +129,18 @@ public class VersionUtils {
   public static boolean areVersionsEqual(String version1, String version2, boolean allowEmptyVersions) {
     return 0 == compareVersions(version1, version2, allowEmptyVersions);
   }
+
+  /**
+   * Return N.N.N from N.N.N.xyz
+   * @param version
+   * @return
+   */
+  public static String getVersionSubstring(String version) {
+    String[] versionParts = version.split("\\.");
+    if (versionParts.length < 3) {
+      throw  new IllegalArgumentException("Invalid version number");
+    }
+
+    return versionParts[0] + "." + versionParts[1] + "." + versionParts[2];
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index e3439e0..931a41a 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -40,6 +40,7 @@ import tempfile
 import random
 import pwd
 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
+import json
 
 # debug settings
 VERBOSE = False
@@ -174,6 +175,17 @@ SECURITY_PROVIDER_KEY_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
                           ".MasterKeyServiceImpl {3} {4} {5} " +\
                           "> " + SERVER_OUT_FILE + " 2>&1"
 
+SCHEMA_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
+                          os.pathsep + "{2} " +\
+                          "org.apache.ambari.server.upgrade.SchemaUpgradeHelper" +\
+                          " {3}"
+
+STACK_UPGRADE_HELPER_CMD="{0}" + os.sep + "bin" + os.sep + "java -cp {1}" +\
+                          os.pathsep + "{2} " +\
+                          "org.apache.ambari.server.upgrade.StackUpgradeHelper" +\
+                          " {3} {4}"
+
+
 SECURITY_KEYS_DIR = "security.server.keys_dir"
 SECURITY_MASTER_KEY_LOCATION = "security.master.key.location"
 SECURITY_KEY_IS_PERSISTED = "security.master.key.ispersisted"
@@ -249,6 +261,8 @@ PG_HBA_CONF_FILE = os.path.join(PG_HBA_DIR, "pg_hba.conf")
 PG_HBA_CONF_FILE_BACKUP = os.path.join(PG_HBA_DIR, "pg_hba_bak.conf.old")
 POSTGRESQL_CONF_FILE = os.path.join(PG_HBA_DIR, "postgresql.conf")
 
+SERVER_VERSION_FILE_PATH = "server.version.file"
+
 JDBC_DATABASE_PROPERTY = "server.jdbc.database"
 JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
 JDBC_PORT_PROPERTY = "server.jdbc.port"
@@ -316,12 +330,6 @@ DATABASE_INIT_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-R
 DATABASE_DROP_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-REMOTE-DROP.sql',
                          '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql',
                          '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql']
-DATABASE_UPGRADE_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-Postgres-REMOTE-UPGRADE.sql',
-                            '/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql',
-                            '/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql']
-DATABASE_STACK_UPGRADE_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-REMOTE-UPGRADE_STACK.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-UPGRADE_STACK.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-UPGRADE_STACK.sql']
 
 JDBC_PROPERTIES_PREFIX = "server.jdbc.properties."
 DATABASE_JDBC_PROPERTIES = [
@@ -387,15 +395,6 @@ DEFAULT_DB_NAME = "ambari"
 # stack repo upgrade
 STACK_LOCATION_KEY = 'metadata.path'
 STACK_LOCATION_DEFAULT = '/var/lib/ambari-server/resources/stacks'
-DATABASE_INSERT_METAINFO_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-INSERT_METAINFO.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-INSERT_METAINFO.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-INSERT_METAINFO.sql']
-DATABASE_FIX_LOCAL_REPO_SCRIPTS = ['/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql',
-                                  '/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-MySQL-FIX_LOCAL_REPO.sql']
-INSERT_METAINFO_CMD = ['su', 'postgres',
-        '--command=psql -f {0} -v metainfo_key="\'{1}\'" -v metainfo_value="\'{2}\'" -v dbname="{3}"']
-FIX_LOCAL_REPO_CMD = ['su', 'postgres', '--command=psql -f {0} -v dbname="{1}"']
 
 #Apache License Header
 ASF_LICENSE_HEADER = '''
@@ -1331,51 +1330,6 @@ def get_db_cli_tool(args):
   return None
 
 
-def remote_stack_upgrade(args, scriptPath, stackId):
-  tool = get_db_cli_tool(args)
-  if not tool:
-    args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
-    if VERBOSE:
-      print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
-    return -1, "Client wasn't found", "Client wasn't found"
-
-  #TODO add support of other databases with scripts
-  stack_name, stack_version = stackId.split(STACK_NAME_VER_SEP)
-  if args.database == "oracle":
-    sid_or_sname = "sid"
-    if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
-        (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
-      print_info_msg("using SERVICE_NAME instead of SID for Oracle")
-      sid_or_sname = "service_name"
-
-    retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      scriptPath,
-      sid_or_sname,
-      stack_name,
-      stack_version
-    )))
-    return retcode, out, err
-  elif args.database == "mysql":
-    retcode, out, err = run_in_shell('{0} {1}'.format(tool, MYSQL_UPGRADE_STACK_ARGS.format(
-      args.database_host,
-      args.database_port,
-      args.database_username,
-      args.database_password,
-      args.database_name,
-      scriptPath,
-      stack_name,
-      stack_version
-    )))
-    return retcode, out, err
-
-  return -2, "Wrong database", "Wrong database"
-  pass
-
 #execute SQL script on remote database
 def execute_remote_script(args, scriptPath):
   tool = get_db_cli_tool(args)
@@ -1427,136 +1381,6 @@ def execute_remote_script(args, scriptPath):
 
   return -2, "Wrong database", "Wrong database"
 
-def prepare_stack_upgrade_command(args, stackId):
-  db_index = DATABASE_NAMES.index(args.database)
-  tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
-  scriptPath = DATABASE_STACK_UPGRADE_SCRIPTS[db_index]
-
-  stack_name, stack_version = stackId.split(STACK_NAME_VER_SEP)
-  if args.database == "oracle":
-    sid_or_sname = "sid"
-    if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
-      (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
-      print_info_msg("using SERVICE_NAME instead of SID for Oracle")
-      sid_or_sname = "service_name"
-
-    command = '{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      scriptPath,
-      sid_or_sname,
-      stack_name,
-      stack_version
-    )).strip()
-    return command
-  elif args.database == "mysql":
-    command = '{0} {1}'.format(tool, MYSQL_UPGRADE_STACK_ARGS.format(
-      args.database_host,
-      args.database_port,
-      args.database_username,
-      args.database_password,
-      args.database_name,
-      scriptPath,
-      stack_name,
-      stack_version
-    )).strip()
-    return command
-  pass
-
-
-def prepare_schema_upgrade_command(args):
-  db_index = DATABASE_NAMES.index(args.database)
-  tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
-  scriptPath = DATABASE_UPGRADE_SCRIPTS[db_index]
-
-  if args.database == "postgres":
-    os.environ["PGPASSWORD"] = args.database_password
-    command = '{0} {1}'.format(tool,  POSTGRES_EXEC_ARGS.format(
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      args.database_username,
-      scriptPath
-    )).strip()
-    return command
-  elif args.database == "oracle":
-    sid_or_sname = "sid"
-    if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
-      (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
-      print_info_msg("using SERVICE_NAME instead of SID for Oracle")
-      sid_or_sname = "service_name"
-
-    command = '{0} {1}'.format(tool, ORACLE_EXEC_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      scriptPath,
-      sid_or_sname
-    )).strip()
-
-    return command
-  elif args.database == "mysql":
-    MYSQL_EXEC_ARGS = MYSQL_EXEC_ARGS_WO_USER_VARS if MYSQL_INIT_SCRIPT == scriptPath else MYSQL_EXEC_ARGS_WITH_USER_VARS
-    command = '{0} {1}'.format(tool, MYSQL_EXEC_ARGS.format(
-      args.database_host,
-      args.database_port,
-      args.database_username,
-      args.database_password,
-      args.database_name,
-      scriptPath
-    )).strip()
-    return command
-  pass
-
-def prepare_local_repo_upgrade_commands(args, dbkey, dbvalue):
-  db_index = DATABASE_NAMES.index(args.database)
-  tool = DATABASE_CLI_TOOLS_DESC[db_index]
-
-  scriptPath = DATABASE_INSERT_METAINFO_SCRIPTS[db_index]
-
-  command_list = []
-
-  if args.database == "oracle":
-    sid_or_sname = "sid"
-    if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
-      (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
-      print_info_msg("using SERVICE_NAME instead of SID for Oracle")
-      sid_or_sname = "service_name"
-
-    command_list.append('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      scriptPath,
-      sid_or_sname,
-      dbkey,
-      dbvalue
-    )).strip())
-
-    command_list.append('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      DATABASE_FIX_LOCAL_REPO_SCRIPTS[db_index],
-      sid_or_sname,
-      '',
-      ''
-    )).strip())
-
-
-  return command_list
-
 def configure_database_password(showDefault=True):
   passwordDefault = PG_DEFAULT_PASSWORD
   if showDefault:
@@ -1686,6 +1510,7 @@ def parse_properties_file(args):
     print_error_msg ("Error getting ambari properties")
     return -1
 
+  args.server_version_file_path = properties[SERVER_VERSION_FILE_PATH]
   args.persistence_type = properties[PERSISTENCE_TYPE_PROPERTY]
   args.jdbc_url = properties[JDBC_URL_PROPERTY]
 
@@ -2661,43 +2486,13 @@ def upgrade_stack(args, stack_id):
     raise FatalException(4, err)
   check_database_name_property()
 
-  parse_properties_file(args)
-  if args.persistence_type == "remote":
-    client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
-    client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_STACK_UPGRADE_SCRIPTS[DATABASE_INDEX], args.database_username,
-                                                                       BLIND_PASSWORD, args.database_name)
-    #TODO temporarty code
-    if not args.database in ["oracle", "mysql"]:
-      raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
-
-    if get_db_cli_tool(args):
-      retcode, out, err = remote_stack_upgrade(args, DATABASE_STACK_UPGRADE_SCRIPTS[DATABASE_INDEX], stack_id)
-      if not retcode == 0:
-        raise NonFatalException(err)
+  stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
+  retcode = run_stack_upgrade(stack_name, stack_version)
 
-    else:
-      command = prepare_stack_upgrade_command(args, stack_id)
-      err = 'Cannot find ' + client_desc + ' client in the path to upgrade the Ambari ' + \
-            'Server stack. To upgrade stack of Ambari Server ' + \
-            'you must run the following command:' + \
-            os.linesep + command
-      args.warnings.append(err)
+  if not retcode == 0:
+    raise FatalException(retcode, 'Stack upgrade failed.')
 
-    pass
-  else:
-    #password access to ambari-server and mapred
-    configure_database_username_password(args)
-    dbname = args.database_name
-    file = args.upgrade_stack_script_file
-    stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
-    command = UPGRADE_STACK_CMD[:]
-    command[-1] = command[-1].format(file, stack_name, stack_version, dbname)
-    retcode, outdata, errdata = run_os_command(command)
-    if not retcode == 0:
-      raise FatalException(retcode, errdata)
-    if errdata:
-      print_warning_msg(errdata)
-    return retcode
+  return retcode
 
 def load_stack_values(version, filename):
   import xml.etree.ElementTree as ET
@@ -2719,108 +2514,6 @@ def load_stack_values(version, filename):
 
   return values
 
-def upgrade_local_repo_remote_db(args, sqlfile, dbkey, dbvalue):
-  tool = get_db_cli_tool(args)
-  if not tool:
-    # args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
-    if VERBOSE:
-      print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
-    return -1, "Client wasn't found", "Client wasn't found"
-
-  #TODO add support of other databases with scripts
-  if args.database == "oracle":
-    sid_or_sname = "sid"
-    if (hasattr(args, 'sid_or_sname') and args.sid_or_sname == "sname") or \
-        (hasattr(args, 'jdbc_url') and args.jdbc_url and re.match(ORACLE_SNAME_PATTERN, args.jdbc_url)):
-      print_info_msg("using SERVICE_NAME instead of SID for Oracle")
-      sid_or_sname = "service_name"
-
-    retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      sqlfile,
-      sid_or_sname,
-      dbkey,
-      dbvalue
-    )))
-
-    retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_UPGRADE_STACK_ARGS.format(
-      args.database_username,
-      args.database_password,
-      args.database_host,
-      args.database_port,
-      args.database_name,
-      DATABASE_FIX_LOCAL_REPO_SCRIPTS[DATABASE_INDEX],
-      sid_or_sname,
-      '',
-      ''
-    )))
-    return retcode, out, err
-
-  return -2, "Wrong database", "Wrong database"
-  pass
-
-def upgrade_local_repo_db(args, dbkey, dbvalue):
-  if not is_root():
-    err = 'Ambari-server upgrade_local_repo_db should be run with ' \
-          'root-level privileges'
-    raise FatalException(4, err)
-  check_database_name_property()
-
-  parse_properties_file(args)
-  if args.persistence_type == "remote":
-    client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
-    client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_INSERT_METAINFO_SCRIPTS[DATABASE_INDEX], args.database_username,
-                                                                       BLIND_PASSWORD, args.database_name)
-    #TODO temporary code
-    if not args.database == "oracle":
-      raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
-
-    if get_db_cli_tool(args):
-      retcode, out, err = upgrade_local_repo_remote_db(args, DATABASE_INSERT_METAINFO_SCRIPTS[DATABASE_INDEX],
-        dbkey, dbvalue)
-      if not retcode == 0:
-        raise NonFatalException(err)
-
-    else:
-      commands = prepare_local_repo_upgrade_commands(args, dbkey, dbvalue)
-      err = 'Cannot find ' + client_desc + ' client in the path to upgrade the local ' + \
-            'repo information. To upgrade local repo information. ' + \
-            'you must run the following commands:'
-      for command in commands:
-        err = err + os.linesep + command
-        pass
-      args.warnings.append(err)
-
-    pass
-  else:
-    #password access to ambari-server and mapred
-    configure_database_username_password(args)
-    dbname = args.database_name
-    sqlfile = DATABASE_INSERT_METAINFO_SCRIPTS[0]
-    command = INSERT_METAINFO_CMD[:]
-    command[-1] = command[-1].format(sqlfile, dbkey, dbvalue, dbname)
-    retcode, outdata, errdata = run_os_command(command)
-    if not retcode == 0:
-      raise FatalException(retcode, errdata)
-    if errdata:
-      print_warning_msg(errdata)
-
-    sqlfile = DATABASE_FIX_LOCAL_REPO_SCRIPTS[0]
-    command = FIX_LOCAL_REPO_CMD[:]
-    command[-1] = command[-1].format(sqlfile, dbname)
-    retcode, outdata, errdata = run_os_command(command)
-    if not retcode == 0:
-      raise FatalException(retcode, errdata)
-    if errdata:
-      print_warning_msg(errdata)
-
-    return retcode
-  pass
-
 
 def get_stack_location(properties):
   stack_location = properties[STACK_LOCATION_KEY]
@@ -2855,6 +2548,8 @@ def upgrade_local_repo(args):
     print_info_msg("Local repo file: " + repo_file_local)
     print_info_msg("Repo file: " + repo_file_local)
 
+    metainfo_update_items = {}
+
     if os.path.exists(repo_file_local) and os.path.exists(repo_file):
       local_values = load_stack_values(stack_version_local, repo_file_local)
       repo_values = load_stack_values(stack_version_local, repo_file)
@@ -2863,7 +2558,65 @@ def upgrade_local_repo(args):
           local_url = local_values[k]
           repo_url = repo_values[k]
           if repo_url != local_url:
-            upgrade_local_repo_db(args, k, local_url)
+            metainfo_update_items[k] = local_url
+
+    run_metainfo_upgrade(metainfo_update_items)
+
+
+def run_schema_upgrade(version):
+  jdk_path = find_jdk()
+  if jdk_path is None:
+    print_error_msg("No JDK found, please run the \"setup\" "
+                    "command to install a JDK automatically or install any "
+                    "JDK manually to " + JDK_INSTALL_DIR)
+    return 1
+  command = SCHEMA_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+                                             get_conf_dir(), version)
+  (retcode, stdout, stderr) = run_os_command(command)
+  print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
+  if retcode > 0:
+    print_error_msg("Error executing schema upgrade, please check the server logs.")
+  return retcode
+
+def run_stack_upgrade(stackName, stackVersion):
+  jdk_path = find_jdk()
+  if jdk_path is None:
+    print_error_msg("No JDK found, please run the \"setup\" "
+                    "command to install a JDK automatically or install any "
+                    "JDK manually to " + JDK_INSTALL_DIR)
+    return 1
+  stackId = {}
+  stackId[stackName] = stackVersion
+
+  command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+                                            get_conf_dir(), "updateStackId",
+                                            json.dumps(stackId))
+  (retcode, stdout, stderr) = run_os_command(command)
+  print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+  if retcode > 0:
+    print_error_msg("Error executing stack upgrade, please check the server logs.")
+  return retcode
+
+def run_metainfo_upgrade(keyValueMap = None):
+  jdk_path = find_jdk()
+  if jdk_path is None:
+    print_error_msg("No JDK found, please run the \"setup\" "
+                    "command to install a JDK automatically or install any "
+                    "JDK manually to " + JDK_INSTALL_DIR)
+
+  retcode = 1
+  if keyValueMap:
+    command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, get_ambari_classpath(),
+                                              get_conf_dir(), 'updateMetaInfo',
+                                              json.dumps(keyValueMap))
+    (retcode, stdout, stderr) = run_os_command(command)
+    print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+    if retcode > 0:
+      print_error_msg("Error executing metainfo upgrade, please check the "
+                      "server logs.")
+
+  return retcode
+
 
 #
 # Upgrades the Ambari Server.
@@ -2898,58 +2651,18 @@ def upgrade(args):
       return -1
 
   parse_properties_file(args)
-  if args.persistence_type == "remote":
-    client_desc = DATABASE_NAMES[DATABASE_INDEX] + ' ' + DATABASE_CLI_TOOLS_DESC[DATABASE_INDEX]
-    client_usage_cmd = DATABASE_CLI_TOOLS_USAGE[DATABASE_INDEX].format(DATABASE_UPGRADE_SCRIPTS[DATABASE_INDEX], args.database_username,
-                                                                            BLIND_PASSWORD, args.database_name)
+  server_version = None
+  if args.server_version_file_path:
+    with open(args.server_version_file_path, 'r') as f:
+      server_version = f.read()
 
-    #TODO temporarty code
-    if not args.database in ["oracle", "mysql"]:
-      raise FatalException(-20, "Upgrade for remote database only supports Oracle.")
+  if not server_version:
+    raise FatalException('Cannot determine server version from version file '
+                         '%s' % args.server_version_file_path)
 
-    if get_db_cli_tool(args):
-      retcode, out, err = execute_remote_script(args, DATABASE_UPGRADE_SCRIPTS[DATABASE_INDEX])
-      if not retcode == 0:
-        raise NonFatalException(err)
-
-    else:
-      command = prepare_schema_upgrade_command(args)
-      err = 'Cannot find ' + client_desc + ' client in the path to upgrade the Ambari ' + \
-            'Server schema. To upgrade Ambari Server schema ' + \
-            'you must run the following command:' + \
-            os.linesep + command
-      args.warnings.append(err)
-
-    pass
-  else:
-    print 'Checking PostgreSQL...'
-    retcode = check_postgre_up()
-    if not retcode == 0:
-      err = 'PostgreSQL server not running. Exiting'
-      raise FatalException(retcode, err)
-
-    file = args.upgrade_script_file
-    print 'Upgrading database...'
-    retcode = execute_db_script(args, file)
-    if not retcode == 0:
-      err = 'Database upgrade script has failed. Exiting.'
-      raise FatalException(retcode, err)
-
-
-    print 'Checking database integrity...'
-    check_file = file[:-3] + "Check" + file[-4:]
-    retcode = check_db_consistency(args, check_file)
-
-    if not retcode == 0:
-      print 'Found inconsistency. Trying to fix...'
-      fix_file = file[:-3] + "Fix" + file[-4:]
-      retcode = execute_db_script(args, fix_file)
-
-      if not retcode == 0:
-        err = 'Database cannot be fixed. Exiting.'
-        raise FatalException(retcode, err)
-    else:
-      print 'Database is consistent.'
+  retcode = run_schema_upgrade(server_version.strip())
+  if not retcode == 0:
+    raise FatalException('Scehma upgrade failed.')
 
   user = read_ambari_user()
   if user is None:

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
deleted file mode 100644
index eb4f2a3..0000000
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
+++ /dev/null
@@ -1,95 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-
-
--- DDL
-
---Upgrade version to current
-UPDATE metainfo SET "metainfo_value" = '${ambariVersion}' WHERE metainfo_key = 'version';
-
-ALTER TABLE hostcomponentdesiredstate ADD admin_state VARCHAR(32);
-
---1.5.0
-CREATE TABLE request (request_id BIGINT NOT NULL, cluster_id BIGINT, request_schedule_id BIGINT, command_name VARCHAR(255), create_time BIGINT NOT NULL, end_time BIGINT NOT NULL, inputs LONGTEXT, request_context VARCHAR(255), request_type VARCHAR(255), start_time BIGINT NOT NULL, status VARCHAR(255), target_component VARCHAR(255), target_hosts LONGTEXT, target_service VARCHAR(255), PRIMARY KEY (request_id));
-CREATE TABLE requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, description varchar(255), status varchar(255), batch_separation_seconds smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
-CREATE TABLE requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body LONGBLOB, request_status varchar(255), return_code smallint, return_message varchar(2000), PRIMARY KEY(schedule_id, batch_id));
-
-insert into request(request_id, cluster_id, request_context, start_time, end_time, create_time)
-  select distinct s.request_id, s.cluster_id, s.request_context, coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1
-  from
-    (select distinct request_id, cluster_id, request_context from stage ) s
-    left join
-    (select request_id, min(start_time) as start_time, max(end_time) as end_time from host_role_command group by request_id) cmd
-    on s.request_id=cmd.request_id;
-
-ALTER TABLE stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id);
-ALTER TABLE requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
-
-
-
---quartz tables
-CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME VARCHAR(120) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, JOB_CLASS_NAME   VARCHAR(250) NOT NULL, IS_DURABLE VARCHAR(1) NOT NULL, IS_NONCONCURRENT VARCHAR(1) NOT NULL, IS_UPDATE_DATA VARCHAR(1) NOT NULL, REQUESTS_RECOVERY VARCHAR(1) NOT NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, NEXT_FIRE_TIME BIGINT(13) NULL, PREV_FIRE_TIME BIGINT(13) NULL, PRIORITY INTEGER NULL, TRIGGER_STATE VARCHAR(16) NOT NULL, TRIGGER_TYPE VARCHAR(8) NOT NULL, START_TIME BIGINT(13) NOT NULL, END_TIME BIGINT(13) NULL, CALENDAR_NAME VARCHAR(200) NULL, MISFIRE_INSTR SMALLINT(2) NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, REPEAT_COUNT BIGINT(7) NOT NULL, REPEAT_INTERVAL BIGINT(12) NOT NULL, TIMES_TRIGGERED BIGINT(10) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, CRON_EXPRESSION VARCHAR(200) NOT NULL, TIME_ZONE_ID VARCHAR(80), PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, STR_PROP_1 VARCHAR(512) NULL, STR_PROP_2 VARCHAR(512) NULL, STR_PROP_3 VARCHAR(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 VARCHAR(1) NULL, BOOL_PROP_2 VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, BLOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME VARCHAR(120) NOT NULL, CALENDAR_NAME  VARCHAR(200) NOT NULL, CALENDAR BLOB NOT NULL, PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
-CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_GROUP  VARCHAR(200) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
-CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, ENTRY_ID VARCHAR(95) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, FIRED_TIME BIGINT(13) NOT NULL, SCHED_TIME BIGINT(13) NOT NULL, PRIORITY INTEGER NOT NULL, STATE VARCHAR(16) NOT NULL, JOB_NAME VARCHAR(200) NULL, JOB_GROUP VARCHAR(200) NULL, IS_NONCONCURRENT VARCHAR(1) NULL, REQUESTS_RECOVERY VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
-CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME VARCHAR(120) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, LAST_CHECKIN_TIME BIGINT(13) NOT NULL, CHECKIN_INTERVAL BIGINT(13) NOT NULL, PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
-CREATE TABLE QRTZ_LOCKS ( SCHED_NAME VARCHAR(120) NOT NULL, LOCK_NAME  VARCHAR(40) NOT NULL, PRIMARY KEY (SCHED_NAME,LOCK_NAME) );
-
-create index idx_qrtz_j_req_recovery on QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_j_grp on QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
-
-create index idx_qrtz_t_j on QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_t_jg on QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_t_c on QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
-create index idx_qrtz_t_g on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
-create index idx_qrtz_t_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
-create index idx_qrtz_t_n_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_n_g_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_next_fire_time on QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
-create index idx_qrtz_t_nft_st_misfire_grp on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
-
-create index idx_qrtz_ft_trig_inst_name on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
-create index idx_qrtz_ft_inst_job_req_rcvry on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_ft_j_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_ft_jg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_ft_t_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
-create index idx_qrtz_ft_tg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
-
-ALTER TABLE hostcomponentdesiredstate ADD passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE';
-ALTER TABLE servicedesiredstate ADD passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE';
-ALTER TABLE hoststate ADD passive_state VARCHAR(512);
-ALTER TABLE host_role_command ADD command_detail VARCHAR(255);
-ALTER TABLE host_role_command ADD custom_command_name VARCHAR(255);
-
--- blueprint related tables
-CREATE TABLE blueprint (blueprint_name VARCHAR(255) NOT NULL, stack_name VARCHAR(255) NOT NULL, stack_version VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name));
-CREATE TABLE hostgroup (blueprint_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, cardinality VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, name));
-CREATE TABLE hostgroup_component (blueprint_name VARCHAR(255) NOT NULL, hostgroup_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, hostgroup_name, name));
-
-ALTER TABLE hostgroup ADD FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
-ALTER TABLE hostgroup_component ADD FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup(blueprint_name, name);


[2/4] AMBARI-4716. Run Ambari Server Upgrade via code rather than DDL/DML. (mpapirkovskyy)

Posted by mp...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
deleted file mode 100644
index 982ca6b..0000000
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
+++ /dev/null
@@ -1,134 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-
-
--- DDL
--- add user_name column to the tables
-ALTER TABLE clusterconfigmapping ADD (user_name VARCHAR2 (255) DEFAULT '_db');
-
-ALTER TABLE hostconfigmapping ADD (user_name VARCHAR2 (255) DEFAULT '_db');
-
-ALTER TABLE stage ADD (cluster_host_info BLOB DEFAULT NULL);
-
--- add decommission state
-ALTER TABLE hostcomponentdesiredstate ADD (admin_state VARCHAR2 (32) DEFAULT NULL);
-ALTER TABLE hostcomponentdesiredstate ADD (passive_state VARCHAR2 (32) NOT NULL DEFAULT 'ACTIVE');
-
--- DML
---Upgrade version to current
-UPDATE metainfo SET "metainfo_value" = '${ambariVersion}' WHERE "metainfo_key" = 'version';
-
-INSERT INTO ambari_sequences(sequence_name, value) values ('configgroup_id_seq', 1);
-
--- drop deprecated tables componentconfigmapping and hostcomponentconfigmapping
--- not required after Config Group implementation
---DROP TABLE componentconfigmapping;
---DROP TABLE hostcomponentconfigmapping;
-
--- required for Config Group implementation
-CREATE TABLE configgroup (group_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, group_name VARCHAR2(255) NOT NULL, tag VARCHAR2(1024) NOT NULL, description VARCHAR2(1024), create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY(group_id), UNIQUE(group_name));
-CREATE TABLE confgroupclusterconfigmapping (config_group_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_type VARCHAR2(255) NOT NULL, version_tag VARCHAR2(255) NOT NULL, user_name VARCHAR2(255) DEFAULT '_db', create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
-CREATE TABLE configgrouphostmapping (config_group_id NUMBER(19) NOT NULL, host_name VARCHAR2(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
-
-ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
-ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN KEY (version_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
-ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
-ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_hname FOREIGN KEY (host_name) REFERENCES hosts (host_name);
-
--- Don't set not null constraint
--- ALTER TABLE stage MODIFY (cluster_host_info NOT NULL);
-
--- blueprint related tables
-CREATE TABLE blueprint (blueprint_name VARCHAR2(255) NOT NULL, stack_name VARCHAR2(255) NOT NULL, stack_version VARCHAR2(255) NOT NULL, PRIMARY KEY(blueprint_name));
-CREATE TABLE hostgroup (blueprint_name VARCHAR2(255) NOT NULL, name VARCHAR2(255) NOT NULL, cardinality VARCHAR2(255) NOT NULL, PRIMARY KEY(blueprint_name, name));
-CREATE TABLE hostgroup_component (blueprint_name VARCHAR2(255) NOT NULL, hostgroup_name VARCHAR2(255) NOT NULL, name VARCHAR2(255) NOT NULL, PRIMARY KEY(blueprint_name, hostgroup_name, name));
-
-ALTER TABLE hostgroup ADD FOREIGN KEY (blueprint_name) REFERENCES ambari.blueprint(blueprint_name);
-ALTER TABLE hostgroup_component ADD FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES ambari.hostgroup(blueprint_name, name);
-
--- Abort all tasks in progress due to format change
-UPDATE host_role_command SET status = 'ABORTED' WHERE status IN ('PENDING', 'QUEUED', 'IN_PROGRESS');
-
-ALTER TABLE hosts DROP COLUMN disks_info;
-
---Added end_time and structured output support to command execution result
-ALTER TABLE host_role_command ADD (end_time NUMBER(19) DEFAULT NULL);
-ALTER TABLE host_role_command ADD (structured_out BLOB DEFAULT NULL);
-ALTER TABLE host_role_command ADD (command_detail VARCHAR(255) DEFAULT NULL);
-ALTER TABLE host_role_command ADD (custom_command_name VARCHAR(255) DEFAULT NULL);
-
---1.5.0 upgrade
-CREATE TABLE request (request_id NUMBER(19) NOT NULL, cluster_id NUMBER(19), request_schedule_id NUMBER(19), command_name VARCHAR(255), create_time NUMBER(19) NOT NULL, end_time NUMBER(19) NOT NULL, inputs CLOB, request_context VARCHAR(255), request_type VARCHAR(255), start_time NUMBER(19) NOT NULL, status VARCHAR(255), target_component VARCHAR(255), target_hosts CLOB, target_service VARCHAR(255), PRIMARY KEY (request_id))
-
-INSERT INTO request(request_id, cluster_id, request_context, start_time, end_time, create_time)
-  SELECT DISTINCT s.request_id, s.cluster_id, s.request_context, nvl(cmd.start_time, -1), nvl(cmd.end_time, -1), -1
-  FROM
-    (SELECT DISTINCT request_id, cluster_id, request_context FROM stage ) s
-    LEFT JOIN
-    (SELECT request_id, min(start_time) as start_time, max(end_time) as end_time FROM host_role_command GROUP BY request_id) cmd
-    ON s.request_id=cmd.request_id;
-
-CREATE TABLE requestschedule (schedule_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, description VARCHAR2(255), status VARCHAR2(255), batch_separation_seconds smallint, batch_toleration_limit smallint, create_user VARCHAR2(255), create_timestamp NUMBER(19), update_user VARCHAR2(255), update_timestamp NUMBER(19), minutes VARCHAR2(10), hours VARCHAR2(10), days_of_month VARCHAR2(10), month VARCHAR2(10), day_of_week VARCHAR2(10), yearToSchedule VARCHAR2(10), startTime VARCHAR2(50), endTime VARCHAR2(50), last_execution_status VARCHAR2(255), PRIMARY KEY(schedule_id));
-CREATE TABLE requestschedulebatchrequest (schedule_id NUMBER(19), batch_id NUMBER(19), request_id NUMBER(19), request_type VARCHAR2(255), request_uri VARCHAR2(1024), request_body BLOB, request_status VARCHAR2(255), return_code smallint, return_message VARCHAR2(2000), PRIMARY KEY(schedule_id, batch_id));
-
-ALTER TABLE stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
-ALTER TABLE request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id);
-ALTER TABLE requestschedulebatchrequest ADD CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id)
-
---quartz tables
-CREATE TABLE qrtz_job_details ( SCHED_NAME VARCHAR2(120) NOT NULL, JOB_NAME  VARCHAR2(200) NOT NULL, JOB_GROUP VARCHAR2(200) NOT NULL, DESCRIPTION VARCHAR2(250) NULL, JOB_CLASS_NAME   VARCHAR2(250) NOT NULL, IS_DURABLE VARCHAR2(1) NOT NULL, IS_NONCONCURRENT VARCHAR2(1) NOT NULL, IS_UPDATE_DATA VARCHAR2(1) NOT NULL, REQUESTS_RECOVERY VARCHAR2(1) NOT NULL, JOB_DATA BLOB NULL, CONSTRAINT QRTZ_JOB_DETAILS_PK PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE qrtz_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, JOB_NAME  VARCHAR2(200) NOT NULL, JOB_GROUP VARCHAR2(200) NOT NULL, DESCRIPTION VARCHAR2(250) NULL, NEXT_FIRE_TIME NUMBER(13) NULL, PREV_FIRE_TIME NUMBER(13) NULL, PRIORITY NUMBER(13) NULL, TRIGGER_STATE VARCHAR2(16) NOT NULL, TRIGGER_TYPE VARCHAR2(8) NOT NULL, START_TIME NUMBER(13) NOT NULL, END_TIME NUMBER(13) NULL, CALENDAR_NAME VARCHAR2(200) NULL, MISFIRE_INSTR NUMBER(2) NULL, JOB_DATA BLOB NULL, CONSTRAINT QRTZ_TRIGGERS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_TRIGGER_TO_JOBS_FK FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
-CREATE TABLE qrtz_simple_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, REPEAT_COUNT NUMBER(7) NOT NULL, REPEAT_INTERVAL NUMBER(12) NOT NULL, TIMES_TRIGGERED NUMBER(10) NOT NULL, CONSTRAINT QRTZ_SIMPLE_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPLE_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE qrtz_cron_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, CRON_EXPRESSION VARCHAR2(120) NOT NULL, TIME_ZONE_ID VARCHAR2(80), CONSTRAINT QRTZ_CRON_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_CRON_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE qrtz_simprop_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, STR_PROP_1 VARCHAR2(512) NULL, STR_PROP_2 VARCHAR2(512) NULL, STR_PROP_3 VARCHAR2(512) NULL, INT_PROP_1 NUMBER(10) NULL, INT_PROP_2 NUMBER(10) NULL, LONG_PROP_1 NUMBER(13) NULL, LONG_PROP_2 NUMBER(13) NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 VARCHAR2(1) NULL, BOOL_PROP_2 VARCHAR2(1) NULL, CONSTRAINT QRTZ_SIMPROP_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPROP_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE qrtz_blob_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, BLOB_DATA BLOB NULL, CONSTRAINT QRTZ_BLOB_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_BLOB_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-CREATE TABLE qrtz_calendars ( SCHED_NAME VARCHAR2(120) NOT NULL, CALENDAR_NAME  VARCHAR2(200) NOT NULL, CALENDAR BLOB NOT NULL, CONSTRAINT QRTZ_CALENDARS_PK PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
-CREATE TABLE qrtz_paused_trigger_grps ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_GROUP  VARCHAR2(200) NOT NULL, CONSTRAINT QRTZ_PAUSED_TRIG_GRPS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
-CREATE TABLE qrtz_fired_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, ENTRY_ID VARCHAR2(95) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, INSTANCE_NAME VARCHAR2(200) NOT NULL, FIRED_TIME NUMBER(13) NOT NULL, SCHED_TIME NUMBER(13) NOT NULL, PRIORITY NUMBER(13) NOT NULL, STATE VARCHAR2(16) NOT NULL, JOB_NAME VARCHAR2(200) NULL, JOB_GROUP VARCHAR2(200) NULL, IS_NONCONCURRENT VARCHAR2(1) NULL, REQUESTS_RECOVERY VARCHAR2(1) NULL, CONSTRAINT QRTZ_FIRED_TRIGGER_PK PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
-CREATE TABLE qrtz_scheduler_state ( SCHED_NAME VARCHAR2(120) NOT NULL, INSTANCE_NAME VARCHAR2(200) NOT NULL, LAST_CHECKIN_TIME NUMBER(13) NOT NULL, CHECKIN_INTERVAL NUMBER(13) NOT NULL, CONSTRAINT QRTZ_SCHEDULER_STATE_PK PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
-CREATE TABLE qrtz_locks ( SCHED_NAME VARCHAR2(120) NOT NULL, LOCK_NAME  VARCHAR2(40) NOT NULL, CONSTRAINT QRTZ_LOCKS_PK PRIMARY KEY (SCHED_NAME,LOCK_NAME) );
-
-create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
-
-create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
-create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
-create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
-create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
-create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
-
-create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
-create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
-create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
-
-ALTER TABLE hoststate ADD (passive_state VARCHAR2(512) DEFAULT NULL);
-ALTER TABLE servicedesiredstate ADD (passive_state VARCHAR2(32) NOT NULL DEFAULT 'ACTIVE');
-
-commit;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
deleted file mode 100644
index faa439b..0000000
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
+++ /dev/null
@@ -1,250 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect :dbname;
-
--- service to cluster level config mappings move. idempotent update
-CREATE LANGUAGE plpgsql;
-
-CREATE OR REPLACE FUNCTION update_clusterconfigmapping()
-  RETURNS void AS
-$_$
-BEGIN
-
-IF NOT EXISTS (
-  SELECT *
-  FROM   pg_catalog.pg_tables
-  WHERE  schemaname = 'ambari'
-  AND    tablename  = 'clusterconfigmapping'
-  )
-  THEN
-
-    CREATE TABLE ambari.clusterconfigmapping (cluster_id bigint NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
-    ALTER TABLE ambari.clusterconfigmapping ADD CONSTRAINT FK_clusterconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-    INSERT INTO ambari.clusterconfigmapping(cluster_id, type_name, version_tag, create_timestamp, selected)
-      (SELECT DISTINCT cluster_id, config_type, config_tag, cast(date_part('epoch', now()) as bigint), 1
-        FROM ambari.serviceconfigmapping scm
-        WHERE timestamp = (SELECT max(timestamp) FROM ambari.serviceconfigmapping WHERE cluster_id = scm.cluster_id AND config_type = scm.config_type));
-    DELETE FROM ambari.serviceconfigmapping;
-
-END IF;
-
-END;
-$_$ LANGUAGE plpgsql;
-
--- Upgrade from 1.2.0
-ALTER TABLE ambari.hosts
-  ADD COLUMN ph_cpu_count INTEGER,
-  ALTER COLUMN disks_info TYPE VARCHAR(32000);
-
--- Upgrade to 1.3.0
-
--- setting run-time search_path for :username
-ALTER SCHEMA ambari OWNER TO :username;
-ALTER ROLE :username SET search_path to 'ambari';
-
---updating clusterstate table
-ALTER TABLE ambari.clusterstate
-  ADD COLUMN current_stack_version VARCHAR(255) NOT NULL;
-
---updating hostconfigmapping table
-ALTER TABLE ambari.hostconfigmapping
-  ADD COLUMN user_name VARCHAR(255) NOT NULL DEFAULT '_db';
-CREATE TABLE ambari.hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
-GRANT ALL PRIVILEGES ON TABLE ambari.hostconfigmapping TO :username;
-ALTER TABLE ambari.hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-
---updating stage table
-ALTER TABLE ambari.stage ADD COLUMN request_context VARCHAR(255);
-ALTER TABLE ambari.stage ADD COLUMN cluster_host_info BYTEA;
-
--- portability changes for MySQL/Oracle support
-ALTER TABLE ambari.hostcomponentdesiredconfigmapping rename to hcdesiredconfigmapping;
-ALTER TABLE ambari.users ALTER column user_id DROP DEFAULT;
-ALTER TABLE ambari.users ALTER column ldap_user TYPE INTEGER USING CASE WHEN ldap_user=true THEN 1 ELSE 0 END;
-
---creating ambari_sequences table instead of deprecated sequences
-CREATE TABLE ambari.ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, "value" BIGINT NOT NULL);
-GRANT ALL PRIVILEGES ON TABLE ambari.ambari_sequences TO :username;
-
-INSERT INTO ambari.ambari_sequences(sequence_name, "value")
-  SELECT 'cluster_id_seq', nextval('ambari.clusters_cluster_id_seq')
-  UNION ALL
-  SELECT 'user_id_seq', nextval('ambari.users_user_id_seq')
-  UNION ALL
-  SELECT 'host_role_command_id_seq', COALESCE((SELECT max(task_id) FROM ambari.host_role_command), 1) + 50
-  UNION ALL
-  SELECT 'configgroup_id_seq', 1;
-
-DROP sequence ambari.host_role_command_task_id_seq;
-DROP sequence ambari.users_user_id_seq;
-DROP sequence ambari.clusters_cluster_id_seq;
-
---updating metainfo table
-CREATE TABLE ambari.metainfo (metainfo_key VARCHAR(255), metainfo_value VARCHAR, PRIMARY KEY(metainfo_key));
-INSERT INTO ambari.metainfo (metainfo_key, metainfo_value) SELECT 'version', '${ambariVersion}';
-UPDATE ambari.metainfo SET metainfo_value = '${ambariVersion}' WHERE metainfo_key = 'version';
-GRANT ALL PRIVILEGES ON TABLE ambari.metainfo TO :username;
-
---replacing deprecated STOP_FAILED and START_FAILED states with INSTALLED
-UPDATE ambari.hostcomponentstate SET current_state = 'INSTALLED' WHERE current_state LIKE 'STOP_FAILED';
-UPDATE ambari.hostcomponentstate SET current_state = 'INSTALLED' WHERE current_state LIKE 'START_FAILED';
-
---updating clusterconfigmapping table
-ALTER TABLE ambari.clusterconfigmapping
-  ADD COLUMN user_name VARCHAR(255) NOT NULL DEFAULT '_db';
-SELECT update_clusterconfigmapping();
-GRANT ALL PRIVILEGES ON TABLE ambari.clusterconfigmapping TO :username;
-
--- drop deprecated tables componentconfigmapping and hostcomponentconfigmapping
--- not required after Config Group implementation
---DROP TABLE componentconfigmapping;
---DROP TABLE hostcomponentconfigmapping;
-
--- required for Config Group implementation
-CREATE TABLE ambari.configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, PRIMARY KEY(group_id), UNIQUE(group_name));
-GRANT ALL PRIVILEGES ON TABLE ambari.configgroup TO :username;
-
-CREATE TABLE ambari.confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
-GRANT ALL PRIVILEGES ON TABLE ambari.confgroupclusterconfigmapping TO :username;
-
-CREATE TABLE ambari.configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
-GRANT ALL PRIVILEGES ON TABLE ambari.configgrouphostmapping TO :username;
-
-ALTER TABLE ambari.configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_config_tag FOREIGN KEY (version_tag, config_type, cluster_id) REFERENCES ambari.clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
-ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
-ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-
--- create blueprint tables
-CREATE TABLE ambari.blueprint (blueprint_name VARCHAR(255) NOT NULL, stack_name VARCHAR(255) NOT NULL, stack_version VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name));
-CREATE TABLE ambari.hostgroup (blueprint_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, cardinality VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, name));
-CREATE TABLE ambari.hostgroup_component (blueprint_name VARCHAR(255) NOT NULL, hostgroup_name VARCHAR(255) NOT NULL, name VARCHAR(255) NOT NULL, PRIMARY KEY(blueprint_name, hostgroup_name, name));
-GRANT ALL PRIVILEGES ON TABLE ambari.blueprint TO :username;
-GRANT ALL PRIVILEGES ON TABLE ambari.hostgroup TO :username;
-GRANT ALL PRIVILEGES ON TABLE ambari.hostgroup_component TO :username
--- add fk constraints to blueprint tables
-ALTER TABLE ambari.hostgroup ADD FOREIGN KEY (blueprint_name) REFERENCES ambari.blueprint(blueprint_name);
-ALTER TABLE ambari.hostgroup_component ADD FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES ambari.hostgroup(blueprint_name, name);
-
-
--- add decommission state
-ALTER TABLE ambari.hostcomponentdesiredstate ADD COLUMN admin_state VARCHAR(32);
-ALTER TABLE ambari.hostcomponentdesiredstate ADD COLUMN passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE'
-
--- Set cluster_host_info column mandatory -- disabled due to update logic change
--- ALTER TABLE ambari.stage ALTER COLUMN cluster_host_info SET NOT NULL;
-
-UPDATE ambari.host_role_command SET status = 'ABORTED' WHERE status IN ('PENDING', 'QUEUED', 'IN_PROGRESS');
-
-ALTER TABLE ambari.hosts DROP COLUMN disks_info;
-
---Added end_time and structured output support to command execution result
-ALTER TABLE ambari.host_role_command ADD COLUMN end_time BIGINT;
-ALTER TABLE ambari.host_role_command ADD COLUMN structured_out BYTEA;
-ALTER TABLE ambari.host_role_command ADD COLUMN command_detail VARCHAR(255);
-ALTER TABLE ambari.host_role_command ADD COLUMN custom_command_name VARCHAR(255);
-
---1.5.0 upgrade
-
-CREATE TABLE ambari.request (request_id BIGINT NOT NULL, cluster_id BIGINT, command_name VARCHAR(255), create_time BIGINT NOT NULL, end_time BIGINT NOT NULL, inputs VARCHAR(32000), request_context VARCHAR(255), request_type VARCHAR(255), request_schedule_id BIGINT, start_time BIGINT NOT NULL, status VARCHAR(255), target_component VARCHAR(255), target_hosts TEXT, target_service VARCHAR(255), PRIMARY KEY (request_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.request TO :username;
-
---insert request data
-begin;
-insert into ambari.request(request_id, cluster_id, request_context, start_time, end_time, create_time) (
-  select distinct s.request_id, s.cluster_id, s.request_context, coalesce (cmd.start_time, -1), coalesce (cmd.end_time, -1), -1
-  from
-    (select distinct request_id, cluster_id, request_context from ambari.stage ) s
-    left join
-    (select request_id, min(start_time) as start_time, max(end_time) as end_time from ambari.host_role_command group by request_id) cmd
-    on s.request_id=cmd.request_id
-);
-
-commit;
-
-CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id bigint NOT NULL, description varchar(255), status varchar(255), batch_separation_seconds smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.requestschedule TO :username;
-
-CREATE TABLE ambari.requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body BYTEA, request_status varchar(255), return_code smallint, return_message varchar(20000), PRIMARY KEY(schedule_id, batch_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.requestschedulebatchrequest TO :username;
-
-ALTER TABLE ambari.stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES ambari.request (request_id);
-ALTER TABLE ambari.request ADD CONSTRAINT FK_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES ambari.requestschedule (schedule_id);
-
---Create quartz tables for schedule manager
-CREATE TABLE ambari.qrtz_job_details ( SCHED_NAME VARCHAR(120) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, JOB_CLASS_NAME   VARCHAR(250) NOT NULL, IS_DURABLE BOOL NOT NULL, IS_NONCONCURRENT BOOL NOT NULL, IS_UPDATE_DATA BOOL NOT NULL, REQUESTS_RECOVERY BOOL NOT NULL, JOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_job_details TO :username;
-
-CREATE TABLE ambari.qrtz_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE VARCHAR(16) NOT NULL, TRIGGER_TYPE VARCHAR(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME VARCHAR(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES ambari.QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_triggers TO :username;
-
-CREATE TABLE ambari.qrtz_simple_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_simple_triggers TO :username;
-
-CREATE TABLE ambari.qrtz_cron_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, CRON_EXPRESSION VARCHAR(120) NOT NULL, TIME_ZONE_ID VARCHAR(80), PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_cron_triggers TO :username;
-
-CREATE TABLE ambari.qrtz_simprop_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, STR_PROP_1 VARCHAR(512) NULL, STR_PROP_2 VARCHAR(512) NULL, STR_PROP_3 VARCHAR(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 BOOL NULL, BOOL_PROP_2 BOOL NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_simprop_triggers TO :username;
-CREATE TABLE ambari.qrtz_blob_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, BLOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_blob_triggers TO :username;
-
-CREATE TABLE ambari.qrtz_calendars ( SCHED_NAME VARCHAR(120) NOT NULL, CALENDAR_NAME  VARCHAR(200) NOT NULL, CALENDAR BYTEA NOT NULL, PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_calendars TO :username;
-
-
-CREATE TABLE ambari.qrtz_paused_trigger_grps ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_GROUP  VARCHAR(200) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_paused_trigger_grps TO :username;
-
-CREATE TABLE ambari.qrtz_fired_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, ENTRY_ID VARCHAR(95) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE VARCHAR(16) NOT NULL, JOB_NAME VARCHAR(200) NULL, JOB_GROUP VARCHAR(200) NULL, IS_NONCONCURRENT BOOL NULL, REQUESTS_RECOVERY BOOL NULL, PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_fired_triggers TO :username;
-
-CREATE TABLE ambari.qrtz_scheduler_state ( SCHED_NAME VARCHAR(120) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL, PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_scheduler_state TO :username;
-
-CREATE TABLE ambari.qrtz_locks (SCHED_NAME VARCHAR(120) NOT NULL, LOCK_NAME  VARCHAR(40) NOT NULL, PRIMARY KEY (SCHED_NAME,LOCK_NAME));
-GRANT ALL PRIVILEGES ON TABLE ambari.qrtz_locks TO :username;
-
-create index idx_qrtz_j_req_recovery on ambari.qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_j_grp on ambari.qrtz_job_details(SCHED_NAME,JOB_GROUP);
-
-create index idx_qrtz_t_j on ambari.qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_t_jg on ambari.qrtz_triggers(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_t_c on ambari.qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
-create index idx_qrtz_t_g on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
-create index idx_qrtz_t_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
-create index idx_qrtz_t_n_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_n_g_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
-create index idx_qrtz_t_next_fire_time on ambari.qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_misfire on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
-create index idx_qrtz_t_nft_st_misfire on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
-create index idx_qrtz_t_nft_st_misfire_grp on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
-
-create index idx_qrtz_ft_trig_inst_name on ambari.qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
-create index idx_qrtz_ft_inst_job_req_rcvry on ambari.qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
-create index idx_qrtz_ft_j_g on ambari.qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
-create index idx_qrtz_ft_jg on ambari.qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
-create index idx_qrtz_ft_t_g on ambari.qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
-create index idx_qrtz_ft_tg on ambari.qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
-
-ALTER TABLE ambari.hoststate ADD COLUMN passive_state VARCHAR(512);
-ALTER TABLE ambari.servicedesiredstate ADD COLUMN passive_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE';

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/quartz.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/quartz.mysql.sql b/ambari-server/src/main/resources/upgrade/ddl/quartz.mysql.sql
new file mode 100644
index 0000000..b596f3b
--- /dev/null
+++ b/ambari-server/src/main/resources/upgrade/ddl/quartz.mysql.sql
@@ -0,0 +1,53 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+--quartz tables
+CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME VARCHAR(120) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, JOB_CLASS_NAME   VARCHAR(250) NOT NULL, IS_DURABLE VARCHAR(1) NOT NULL, IS_NONCONCURRENT VARCHAR(1) NOT NULL, IS_UPDATE_DATA VARCHAR(1) NOT NULL, REQUESTS_RECOVERY VARCHAR(1) NOT NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, NEXT_FIRE_TIME BIGINT(13) NULL, PREV_FIRE_TIME BIGINT(13) NULL, PRIORITY INTEGER NULL, TRIGGER_STATE VARCHAR(16) NOT NULL, TRIGGER_TYPE VARCHAR(8) NOT NULL, START_TIME BIGINT(13) NOT NULL, END_TIME BIGINT(13) NULL, CALENDAR_NAME VARCHAR(200) NULL, MISFIRE_INSTR SMALLINT(2) NULL, JOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, REPEAT_COUNT BIGINT(7) NOT NULL, REPEAT_INTERVAL BIGINT(12) NOT NULL, TIMES_TRIGGERED BIGINT(10) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, CRON_EXPRESSION VARCHAR(200) NOT NULL, TIME_ZONE_ID VARCHAR(80), PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, STR_PROP_1 VARCHAR(512) NULL, STR_PROP_2 VARCHAR(512) NULL, STR_PROP_3 VARCHAR(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 VARCHAR(1) NULL, BOOL_PROP_2 VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, BLOB_DATA BLOB NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME VARCHAR(120) NOT NULL, CALENDAR_NAME  VARCHAR(200) NOT NULL, CALENDAR BLOB NOT NULL, PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
+CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_GROUP  VARCHAR(200) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
+CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME VARCHAR(120) NOT NULL, ENTRY_ID VARCHAR(95) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, FIRED_TIME BIGINT(13) NOT NULL, SCHED_TIME BIGINT(13) NOT NULL, PRIORITY INTEGER NOT NULL, STATE VARCHAR(16) NOT NULL, JOB_NAME VARCHAR(200) NULL, JOB_GROUP VARCHAR(200) NULL, IS_NONCONCURRENT VARCHAR(1) NULL, REQUESTS_RECOVERY VARCHAR(1) NULL, PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
+CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME VARCHAR(120) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, LAST_CHECKIN_TIME BIGINT(13) NOT NULL, CHECKIN_INTERVAL BIGINT(13) NOT NULL, PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
+CREATE TABLE QRTZ_LOCKS ( SCHED_NAME VARCHAR(120) NOT NULL, LOCK_NAME  VARCHAR(40) NOT NULL, PRIMARY KEY (SCHED_NAME,LOCK_NAME) );
+
+create index idx_qrtz_j_req_recovery on QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/quartz.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/quartz.oracle.sql b/ambari-server/src/main/resources/upgrade/ddl/quartz.oracle.sql
new file mode 100644
index 0000000..bdde07e
--- /dev/null
+++ b/ambari-server/src/main/resources/upgrade/ddl/quartz.oracle.sql
@@ -0,0 +1,55 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+--quartz tables
+CREATE TABLE qrtz_job_details ( SCHED_NAME VARCHAR2(120) NOT NULL, JOB_NAME  VARCHAR2(200) NOT NULL, JOB_GROUP VARCHAR2(200) NOT NULL, DESCRIPTION VARCHAR2(250) NULL, JOB_CLASS_NAME   VARCHAR2(250) NOT NULL, IS_DURABLE VARCHAR2(1) NOT NULL, IS_NONCONCURRENT VARCHAR2(1) NOT NULL, IS_UPDATE_DATA VARCHAR2(1) NOT NULL, REQUESTS_RECOVERY VARCHAR2(1) NOT NULL, JOB_DATA BLOB NULL, CONSTRAINT QRTZ_JOB_DETAILS_PK PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE qrtz_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, JOB_NAME  VARCHAR2(200) NOT NULL, JOB_GROUP VARCHAR2(200) NOT NULL, DESCRIPTION VARCHAR2(250) NULL, NEXT_FIRE_TIME NUMBER(13) NULL, PREV_FIRE_TIME NUMBER(13) NULL, PRIORITY NUMBER(13) NULL, TRIGGER_STATE VARCHAR2(16) NOT NULL, TRIGGER_TYPE VARCHAR2(8) NOT NULL, START_TIME NUMBER(13) NOT NULL, END_TIME NUMBER(13) NULL, CALENDAR_NAME VARCHAR2(200) NULL, MISFIRE_INSTR NUMBER(2) NULL, JOB_DATA BLOB NULL, CONSTRAINT QRTZ_TRIGGERS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_TRIGGER_TO_JOBS_FK FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE qrtz_simple_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, REPEAT_COUNT NUMBER(7) NOT NULL, REPEAT_INTERVAL NUMBER(12) NOT NULL, TIMES_TRIGGERED NUMBER(10) NOT NULL, CONSTRAINT QRTZ_SIMPLE_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPLE_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE qrtz_cron_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, CRON_EXPRESSION VARCHAR2(120) NOT NULL, TIME_ZONE_ID VARCHAR2(80), CONSTRAINT QRTZ_CRON_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_CRON_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE qrtz_simprop_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, STR_PROP_1 VARCHAR2(512) NULL, STR_PROP_2 VARCHAR2(512) NULL, STR_PROP_3 VARCHAR2(512) NULL, INT_PROP_1 NUMBER(10) NULL, INT_PROP_2 NUMBER(10) NULL, LONG_PROP_1 NUMBER(13) NULL, LONG_PROP_2 NUMBER(13) NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 VARCHAR2(1) NULL, BOOL_PROP_2 VARCHAR2(1) NULL, CONSTRAINT QRTZ_SIMPROP_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPROP_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE qrtz_blob_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, BLOB_DATA BLOB NULL, CONSTRAINT QRTZ_BLOB_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), CONSTRAINT QRTZ_BLOB_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE qrtz_calendars ( SCHED_NAME VARCHAR2(120) NOT NULL, CALENDAR_NAME  VARCHAR2(200) NOT NULL, CALENDAR BLOB NOT NULL, CONSTRAINT QRTZ_CALENDARS_PK PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
+CREATE TABLE qrtz_paused_trigger_grps ( SCHED_NAME VARCHAR2(120) NOT NULL, TRIGGER_GROUP  VARCHAR2(200) NOT NULL, CONSTRAINT QRTZ_PAUSED_TRIG_GRPS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
+CREATE TABLE qrtz_fired_triggers ( SCHED_NAME VARCHAR2(120) NOT NULL, ENTRY_ID VARCHAR2(95) NOT NULL, TRIGGER_NAME VARCHAR2(200) NOT NULL, TRIGGER_GROUP VARCHAR2(200) NOT NULL, INSTANCE_NAME VARCHAR2(200) NOT NULL, FIRED_TIME NUMBER(13) NOT NULL, SCHED_TIME NUMBER(13) NOT NULL, PRIORITY NUMBER(13) NOT NULL, STATE VARCHAR2(16) NOT NULL, JOB_NAME VARCHAR2(200) NULL, JOB_GROUP VARCHAR2(200) NULL, IS_NONCONCURRENT VARCHAR2(1) NULL, REQUESTS_RECOVERY VARCHAR2(1) NULL, CONSTRAINT QRTZ_FIRED_TRIGGER_PK PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
+CREATE TABLE qrtz_scheduler_state ( SCHED_NAME VARCHAR2(120) NOT NULL, INSTANCE_NAME VARCHAR2(200) NOT NULL, LAST_CHECKIN_TIME NUMBER(13) NOT NULL, CHECKIN_INTERVAL NUMBER(13) NOT NULL, CONSTRAINT QRTZ_SCHEDULER_STATE_PK PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
+CREATE TABLE qrtz_locks ( SCHED_NAME VARCHAR2(120) NOT NULL, LOCK_NAME  VARCHAR2(40) NOT NULL, CONSTRAINT QRTZ_LOCKS_PK PRIMARY KEY (SCHED_NAME,LOCK_NAME) );
+
+create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
+
+commit;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/ddl/quartz.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/quartz.postgres.sql b/ambari-server/src/main/resources/upgrade/ddl/quartz.postgres.sql
new file mode 100644
index 0000000..0d324b9
--- /dev/null
+++ b/ambari-server/src/main/resources/upgrade/ddl/quartz.postgres.sql
@@ -0,0 +1,53 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+--Create quartz tables for schedule manager
+CREATE TABLE ambari.qrtz_job_details ( SCHED_NAME VARCHAR(120) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, JOB_CLASS_NAME   VARCHAR(250) NOT NULL, IS_DURABLE BOOL NOT NULL, IS_NONCONCURRENT BOOL NOT NULL, IS_UPDATE_DATA BOOL NOT NULL, REQUESTS_RECOVERY BOOL NOT NULL, JOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE ambari.qrtz_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, JOB_NAME  VARCHAR(200) NOT NULL, JOB_GROUP VARCHAR(200) NOT NULL, DESCRIPTION VARCHAR(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE VARCHAR(16) NOT NULL, TRIGGER_TYPE VARCHAR(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME VARCHAR(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP) REFERENCES ambari.QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP) );
+CREATE TABLE ambari.qrtz_simple_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE ambari.qrtz_cron_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, CRON_EXPRESSION VARCHAR(120) NOT NULL, TIME_ZONE_ID VARCHAR(80), PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE ambari.qrtz_simprop_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, STR_PROP_1 VARCHAR(512) NULL, STR_PROP_2 VARCHAR(512) NULL, STR_PROP_3 VARCHAR(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 BOOL NULL, BOOL_PROP_2 BOOL NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE ambari.qrtz_blob_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, BLOB_DATA BYTEA NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) REFERENCES ambari.QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP) );
+CREATE TABLE ambari.qrtz_calendars ( SCHED_NAME VARCHAR(120) NOT NULL, CALENDAR_NAME  VARCHAR(200) NOT NULL, CALENDAR BYTEA NOT NULL, PRIMARY KEY (SCHED_NAME,CALENDAR_NAME) );
+CREATE TABLE ambari.qrtz_paused_trigger_grps ( SCHED_NAME VARCHAR(120) NOT NULL, TRIGGER_GROUP  VARCHAR(200) NOT NULL, PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP) );
+CREATE TABLE ambari.qrtz_fired_triggers ( SCHED_NAME VARCHAR(120) NOT NULL, ENTRY_ID VARCHAR(95) NOT NULL, TRIGGER_NAME VARCHAR(200) NOT NULL, TRIGGER_GROUP VARCHAR(200) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE VARCHAR(16) NOT NULL, JOB_NAME VARCHAR(200) NULL, JOB_GROUP VARCHAR(200) NULL, IS_NONCONCURRENT BOOL NULL, REQUESTS_RECOVERY BOOL NULL, PRIMARY KEY (SCHED_NAME,ENTRY_ID) );
+CREATE TABLE ambari.qrtz_scheduler_state ( SCHED_NAME VARCHAR(120) NOT NULL, INSTANCE_NAME VARCHAR(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL, PRIMARY KEY (SCHED_NAME,INSTANCE_NAME) );
+CREATE TABLE ambari.qrtz_locks (SCHED_NAME VARCHAR(120) NOT NULL, LOCK_NAME  VARCHAR(40) NOT NULL, PRIMARY KEY (SCHED_NAME,LOCK_NAME));
+
+create index idx_qrtz_j_req_recovery on ambari.qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on ambari.qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on ambari.qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on ambari.qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on ambari.qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on ambari.qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on ambari.qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on ambari.qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on ambari.qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on ambari.qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on ambari.qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on ambari.qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on ambari.qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on ambari.qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql b/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql
deleted file mode 100644
index 9e93c70..0000000
--- a/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql
+++ /dev/null
@@ -1,45 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- 'License'); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an 'AS IS' BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-
--- Remove HDPLocal in the stack
-
-UPDATE ambari.clusters
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(desired_stack_version, 'HDPLocal') > 0;
-
-UPDATE ambari.clusterstate
-  SET current_stack_version = replace(current_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(current_stack_version, 'HDPLocal') > 0;
-
-UPDATE ambari.hostcomponentdesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(desired_stack_version, 'HDPLocal') > 0;
-
-UPDATE ambari.hostcomponentstate
-  SET current_stack_version = replace(current_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(current_stack_version, 'HDPLocal') > 0;
-
-UPDATE ambari.servicecomponentdesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(desired_stack_version, 'HDPLocal') > 0;
-
-UPDATE ambari.servicedesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE INSTR(desired_stack_version, 'HDPLocal') > 0;
-
-commit;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql b/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql
deleted file mode 100644
index 180beab..0000000
--- a/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql
+++ /dev/null
@@ -1,44 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect :dbname;
-
-UPDATE ambari.clusters
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in desired_stack_version) > 0;
-
-UPDATE ambari.clusterstate
-  SET current_stack_version = replace(current_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in current_stack_version) > 0;
-
-UPDATE ambari.hostcomponentdesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in desired_stack_version) > 0;
-
-UPDATE ambari.hostcomponentstate
-  SET current_stack_version = replace(current_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in current_stack_version) > 0;
-
-UPDATE ambari.servicecomponentdesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in desired_stack_version) > 0;
-
-UPDATE ambari.servicedesiredstate
-  SET desired_stack_version = replace(desired_stack_version, 'HDPLocal', 'HDP')
-  WHERE position('HDPLocal' in desired_stack_version) > 0;
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
new file mode 100644
index 0000000..621c780
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import junit.framework.Assert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.junit.matchers.JUnitMatchers.containsString;
+
+public class DBAccessorImplTest {
+  private Injector injector;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  @Before
+  public void setUp() throws Exception {
+    injector = Guice.createInjector(new InMemoryDefaultTestModule());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+
+  }
+
+  private void createMyTable(String tableName) throws Exception {
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("name", String.class, 20000, null, true));
+    columns.add(new DBColumnInfo("time", Long.class, null, null, true));
+
+    dbAccessor.createTable(tableName, columns, "id");
+  }
+
+  @Test
+  public void testCreateTable() throws Exception {
+    createMyTable("mytable1");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("insert into mytable1(id, name) values(1,'hello')");
+
+    ResultSet resultSet = statement.executeQuery("select * from mytable1");
+
+    int count = 0;
+    while (resultSet.next()) {
+      assertEquals(resultSet.getString("name"), "hello");
+      count++;
+    }
+
+    assertEquals(count, 1);
+  }
+
+  @Test
+  public void testAddFKConstraint() throws Exception {
+    createMyTable("mytable2");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    columns.add(new DBColumnInfo("fid", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("fname", String.class, null, null, false));
+
+    dbAccessor.createTable("foreigntable", columns, "fid");
+
+    dbAccessor.addFKConstraint("foreigntable", "MYFKCONSTRAINT", "fid",
+      "mytable2", "id", false);
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("insert into mytable2(id, name) values(1,'hello')");
+    statement.execute("insert into foreigntable(fid, fname) values(1,'howdy')");
+
+    ResultSet resultSet = statement.executeQuery("select * from foreigntable");
+
+    int count = 0;
+    while (resultSet.next()) {
+      assertEquals(resultSet.getString("fname"), "howdy");
+      count++;
+    }
+    resultSet.close();
+    assertEquals(count, 1);
+
+    exception.expect(SQLException.class);
+    exception.expectMessage(containsString("MYFKCONSTRAINT"));
+    dbAccessor.dropTable("mytable2");
+  }
+
+  @Test
+  public void testAddColumn() throws Exception {
+    createMyTable("mytable3");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    DBColumnInfo dbColumnInfo = new DBColumnInfo("description", String.class,
+      null, null, true);
+
+    dbAccessor.addColumn("mytable3", dbColumnInfo);
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("update mytable3 set description = 'blah' where id = 1");
+
+    ResultSet resultSet = statement.executeQuery("select description from mytable3");
+
+    while (resultSet.next()) {
+      assertEquals(resultSet.getString("description"), "blah");
+    }
+    resultSet.close();
+  }
+
+  @Test
+  public void testUpdateTable() throws Exception {
+    createMyTable("mytable4");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    dbAccessor.updateTable("mytable4", "name", "blah", "where id = 1");
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    ResultSet resultSet = statement.executeQuery("select name from mytable4");
+
+    while (resultSet.next()) {
+      assertEquals(resultSet.getString("name"), "blah");
+    }
+    resultSet.close();
+  }
+
+  @Test
+  public void testRenameColumn() throws Exception {
+    createMyTable("mytable6");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    dbAccessor.executeQuery("insert into mytable6(id, name, time) values(1, 'Bob', 1234567)");
+
+    dbAccessor.renameColumn("mytable6", "time", new DBColumnInfo("new_time", Long.class, 0, null, true));
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    ResultSet resultSet = statement.executeQuery("select new_time from mytable6 where id=1");
+    int count = 0;
+    while (resultSet.next()) {
+      count++;
+      long newTime = resultSet.getLong("new_time");
+      assertEquals(newTime, 1234567L);
+    }
+
+    assertEquals(count, 1);
+  }
+
+  @Test
+  public void testModifyColumn() throws Exception {
+    createMyTable("mytable7");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    dbAccessor.executeQuery("insert into mytable7(id, name, time) values(1, 'Bob', 1234567)");
+
+    dbAccessor.alterColumn("mytable7", new DBColumnInfo("name", String.class, 25000));
+
+  }
+
+  @Test
+  public void testAddColumnWithDefault() throws Exception {
+    createMyTable("mytable8");
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    dbAccessor.executeQuery("insert into mytable8(id, name, time) values(1, 'Bob', 1234567)");
+
+    dbAccessor.addColumn("mytable8", new DBColumnInfo("test", String.class, 1000, "test", false));
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    ResultSet resultSet = statement.executeQuery("select * from mytable8");
+    int count = 0;
+    while (resultSet.next()) {
+      assertEquals(resultSet.getString("test"), "test");
+      count++;
+    }
+
+    assertEquals(count, 1);
+
+  }
+
+  @Ignore // Not working with derby db driver
+  @Test
+  public void testTableHasFKConstraint() throws Exception {
+    createMyTable("mytable5");
+
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    columns.add(new DBColumnInfo("fid", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("fname", String.class, null, null, false));
+
+    dbAccessor.createTable("foreigntable5", columns, "fid");
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("ALTER TABLE foreigntable5 ADD CONSTRAINT FK_test FOREIGN KEY (fid) REFERENCES mytable5 (id)");
+
+    Assert.assertTrue(dbAccessor.tableHasForeignKey("foreigntable5",
+      "mytable5", "fid", "id"));
+  }
+
+  @Test
+  public void testTableExists() throws Exception {
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("Create table testTable (id VARCHAR(255))");
+
+    Assert.assertTrue(dbAccessor.tableExists("testTable"));
+  }
+
+  @Test
+  public void testColumnExists() throws Exception {
+    createMyTable("mytable6");
+
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    Assert.assertTrue(dbAccessor.tableHasColumn("mytable6", "time"));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
new file mode 100644
index 0000000..6b4bbc0
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.multibindings.Multibinder;
+import com.google.inject.persist.PersistService;
+import junit.framework.Assert;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Set;
+
+public class UpgradeCatalogTest {
+  private Injector injector;
+  private AmbariMetaInfo metaInfo;
+
+  private static class UpgradeCatalog149 extends AbstractUpgradeCatalog {
+    public UpgradeCatalog149(Injector injector) {
+      super(injector);
+    }
+
+    @Override
+    public void executeDDLUpdates() throws AmbariException, SQLException {
+      // do nothing: only for path testing
+    }
+
+    @Override
+    public void executeDMLUpdates() throws AmbariException, SQLException {
+
+    }
+
+    @Override
+    public String getTargetVersion() {
+      return "1.4.9";
+    }
+  }
+
+  private static class UpgradeHelperModuleTest extends InMemoryDefaultTestModule {
+    @Override
+    protected void configure() {
+      super.configure();
+
+      // Add binding to each newly created catalog
+      Multibinder<UpgradeCatalog> catalogBinder =
+        Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
+      catalogBinder.addBinding().to(UpgradeCatalog150.class);
+      catalogBinder.addBinding().to(UpgradeCatalog149.class);
+    }
+  }
+
+  @Before
+  public void setup() throws Exception {
+    injector  = Guice.createInjector(new UpgradeHelperModuleTest());
+    injector.getInstance(GuiceJpaInitializer.class);
+    metaInfo = injector.getInstance(AmbariMetaInfo.class);
+  }
+
+  @After
+  public void teardown() throws AmbariException {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @Test
+  public void testUpgradePath() throws Exception {
+    SchemaUpgradeHelper schemaUpgradeHelper = injector.getInstance
+      (SchemaUpgradeHelper.class);
+
+    Set<UpgradeCatalog> upgradeCatalogSet = schemaUpgradeHelper.getAllUpgradeCatalogs();
+
+    Assert.assertNotNull(upgradeCatalogSet);
+    Assert.assertEquals(2, upgradeCatalogSet.size());
+
+    List<UpgradeCatalog> upgradeCatalogs =
+      schemaUpgradeHelper.getUpgradePath(null, "1.5.1");
+
+    Assert.assertNotNull(upgradeCatalogs);
+    Assert.assertEquals(2, upgradeCatalogs.size());
+    Assert.assertEquals("1.4.9", upgradeCatalogs.get(0).getTargetVersion());
+    Assert.assertEquals("1.5.0", upgradeCatalogs.get(1).getTargetVersion());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 3075391..4653f27 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -28,6 +28,7 @@ import signal
 import stat
 import datetime
 import operator
+import json
 from pwd import getpwnam
 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
 
@@ -2715,16 +2716,11 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     self.assertTrue(removeMock.called)
 
 
-  @patch.object(ambari_server, "configure_database_username_password")
-  @patch.object(ambari_server, "run_os_command")
   @patch.object(ambari_server, "is_root")
   @patch.object(ambari_server, "check_database_name_property")
-  @patch.object(ambari_server, "parse_properties_file")
-  @patch.object(ambari_server, "get_db_cli_tool")
-  @patch.object(ambari_server, "remote_stack_upgrade")
-  def test_upgrade_stack(self, remote_stack_upgrade_mock, get_db_cli_tool_mock, parse_properties_file_mock,
-                         check_database_name_property_mock, is_root_mock, run_os_command_mock,
-                         configure_postgres_username_password_mock):
+  @patch.object(ambari_server, "run_stack_upgrade")
+  def test_upgrade_stack(self, run_stack_upgrade_mock,
+                         check_database_name_property_mock, is_root_mock):
     args = MagicMock()
     args.persistence_type = "local"
 
@@ -2740,40 +2736,78 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
 
     # Testing calls under root
     is_root_mock.return_value = True
-    run_os_command_mock.return_value = (0, '', '')
-    check_database_name_property_mock.return_value = 1
+    run_stack_upgrade_mock.return_value = 0
     ambari_server.upgrade_stack(args, 'HDP-2.0')
 
-    self.assertTrue(configure_postgres_username_password_mock.called)
+    self.assertTrue(run_stack_upgrade_mock.called)
+    run_stack_upgrade_mock.assert_called_with("HDP", "2.0")
+
+  @patch.object(ambari_server, 'get_conf_dir')
+  @patch.object(ambari_server, 'get_ambari_classpath')
+  @patch.object(ambari_server, 'run_os_command')
+  @patch.object(ambari_server, 'find_jdk')
+  def test_run_stack_upgrade(self, jdk_path_mock, run_os_command_mock,
+                             get_ambari_classpath_mock, get_conf_dir_mock):
+    jdk_path_mock.return_value = "/usr/lib/java"
+    run_os_command_mock.return_value = (0, None, None)
+    get_ambari_classpath_mock.return_value = 'test:path12'
+    get_conf_dir_mock.return_value = '/etc/conf'
+    stackIdMap = {'HDP' : '2.0'}
+
+    ambari_server.run_stack_upgrade('HDP', '2.0')
+
+    self.assertTrue(jdk_path_mock.called)
+    self.assertTrue(get_ambari_classpath_mock.called)
+    self.assertTrue(get_conf_dir_mock.called)
     self.assertTrue(run_os_command_mock.called)
+    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp '
+        'test:path12:/etc/conf org.apache.ambari.server.upgrade.StackUpgradeHelper '
+        'updateStackId ' + json.dumps(stackIdMap))
 
-    # Test remote oracle/mysql
-    configure_postgres_username_password_mock.reset_mock()
-    run_os_command_mock.reset_mock()
-    args.persistence_type = "remote"
-    args.database = "oracle"
 
-    get_db_cli_tool_mock.return_value = "psql"
-    remote_stack_upgrade_mock.return_value = (0, "test", "test")
+  @patch.object(ambari_server, 'get_conf_dir')
+  @patch.object(ambari_server, 'get_ambari_classpath')
+  @patch.object(ambari_server, 'run_os_command')
+  @patch.object(ambari_server, 'find_jdk')
+  def test_run_schema_upgrade(self, jdk_path_mock, run_os_command_mock,
+                              get_ambari_classpath_mock, get_conf_dir_mock):
+    jdk_path_mock.return_value = "/usr/lib/java"
+    run_os_command_mock.return_value = (0, None, None)
+    get_ambari_classpath_mock.return_value = 'test:path12'
+    get_conf_dir_mock.return_value = '/etc/conf'
 
-    ambari_server.upgrade_stack(args, 'HDP-2.0')
+    ambari_server.run_schema_upgrade('1.4.9.40')
 
-    self.assertTrue(get_db_cli_tool_mock.called)
-    self.assertTrue(remote_stack_upgrade_mock.called)
-    self.assertFalse(run_os_command_mock.called)
+    self.assertTrue(jdk_path_mock.called)
+    self.assertTrue(get_ambari_classpath_mock.called)
+    self.assertTrue(get_conf_dir_mock.called)
+    self.assertTrue(run_os_command_mock.called)
+    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp '
+        'test:path12:/etc/conf org.apache.ambari.server.upgrade.SchemaUpgradeHelper '
+        '1.4.9.40')
 
-    get_db_cli_tool_mock.reset_mock()
-    remote_stack_upgrade_mock.reset_mock()
-    run_os_command_mock.reset_mock()
 
-    args.database = "mysql"
-    get_db_cli_tool_mock.return_value = "mysql"
-    remote_stack_upgrade_mock.return_value = (0, "test_mysql_stack_upgrade", "test_mysql_stack_upgrade")
-    ambari_server.upgrade_stack(args, 'HDP-2.0')
+  @patch.object(ambari_server, 'get_conf_dir')
+  @patch.object(ambari_server, 'get_ambari_classpath')
+  @patch.object(ambari_server, 'run_os_command')
+  @patch.object(ambari_server, 'find_jdk')
+  def test_run_metainfo_upgrade(self, jdk_path_mock, run_os_command_mock,
+                                get_ambari_classpath_mock, get_conf_dir_mock):
+    jdk_path_mock.return_value = "/usr/lib/java"
+    run_os_command_mock.return_value = (0, None, None)
+    get_ambari_classpath_mock.return_value = 'test:path12'
+    get_conf_dir_mock.return_value = '/etc/conf'
 
-    self.assertTrue(get_db_cli_tool_mock.called)
-    self.assertTrue(remote_stack_upgrade_mock.called)
-    self.assertFalse(run_os_command_mock.called)
+    json_map = {'a': 'http://newurl'}
+    ambari_server.run_metainfo_upgrade(json_map)
+
+    self.assertTrue(jdk_path_mock.called)
+    self.assertTrue(get_ambari_classpath_mock.called)
+    self.assertTrue(get_conf_dir_mock.called)
+    self.assertTrue(run_os_command_mock.called)
+    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp '
+        'test:path12:/etc/conf org.apache.ambari.server.upgrade.StackUpgradeHelper '
+        'updateMetaInfo ' + json.dumps(json_map))
 
 
   @patch("__builtin__.open")
@@ -2782,35 +2816,25 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
   @patch.object(ambari_server, "adjust_directory_permissions")
   @patch.object(ambari_server, "print_warning_msg")
   @patch.object(ambari_server, "read_ambari_user")
-  @patch.object(ambari_server, "check_db_consistency")
-  @patch.object(ambari_server, "execute_db_script")
-  @patch.object(ambari_server, "check_postgre_up")
+  @patch.object(ambari_server, "run_schema_upgrade")
   @patch.object(ambari_server, "update_ambari_properties")
   @patch.object(ambari_server, "parse_properties_file")
   @patch.object(ambari_server, "is_root")
   @patch.object(ambari_server, "get_ambari_properties")
-  @patch.object(ambari_server, "get_db_cli_tool")
-  @patch.object(ambari_server, "execute_remote_script")
   @patch.object(ambari_server, "upgrade_local_repo")
-  def test_upgrade(self, upgrade_local_repo_mock, execute_remote_script_mock,
-                   get_db_cli_tool_mock, get_ambari_properties_mock, is_root_mock,
+  def test_upgrade(self, upgrade_local_repo_mock,
+                   get_ambari_properties_mock, is_root_mock,
                    parse_properties_file_mock,
-                   update_ambari_properties_mock,
-                   check_postgre_up_mock, execute_db_script_mock,
-                   check_db_consistency_mock, read_ambari_user_mock,
-                   print_warning_msg_mock, adjust_directory_permissions_mock,
+                   update_ambari_properties_mock, run_schema_upgrade_mock,
+                   read_ambari_user_mock, print_warning_msg_mock,
+                   adjust_directory_permissions_mock,
                    find_properties_file_mock, properties_store_mock, open_mock):
 
     args = MagicMock()
     check_database_name_property_mock = MagicMock()
 
-    args.upgrade_script_file = "/var/lib/" \
-                               "ambari-server/resources/upgrade/ddl/" \
-                               "Ambari-DDL-Postgres-UPGRADE-1.3.0.sql"
     update_ambari_properties_mock.return_value = 0
-    check_postgre_up_mock.return_value = 0
-    execute_db_script_mock.return_value = 0
-    check_db_consistency_mock.return_value = 0
+    run_schema_upgrade_mock.return_value = 0
 
     # Testing call under non-root
     is_root_mock.return_value = False
@@ -2827,6 +2851,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
 
     # Testing with undefined custom user
     read_ambari_user_mock.return_value = None
+    run_schema_upgrade_mock.return_value = 0
     ambari_server.upgrade(args)
     self.assertTrue(print_warning_msg_mock.called)
     warning_args = print_warning_msg_mock.call_args[0][0]
@@ -2838,94 +2863,17 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     ambari_server.upgrade(args)
     self.assertTrue(adjust_directory_permissions_mock.called)
 
-    # Test if check_database_name_property raise exception, added default
-    # JDBC_DATABASE_PROPERTY and upgrade process doesn't fails
-    def effect():
-      raise FatalException()
-
     properties = ambari_server.Properties()
     get_ambari_properties_mock.return_value = properties
-
-    check_database_name_property_mock.side_effect = effect
+    run_schema_upgrade_mock.return_value = 0
     parse_properties_file_mock.called = False
     retcode = ambari_server.upgrade(args)
     self.assertTrue(get_ambari_properties_mock.called)
 
-    self.assertTrue(properties.get_property(ambari_server.JDBC_DATABASE_PROPERTY))
     self.assertNotEqual(-1, retcode)
     self.assertTrue(parse_properties_file_mock.called)
+    self.assertTrue(run_schema_upgrade_mock.called)
 
-    #Test remote upgrade
-    get_db_cli_tool_mock.return_value = "psql"
-    execute_remote_script_mock.return_value = (0, "test", "test")
-    args.persistence_type = "remote"
-    args.database = "oracle"
-
-    ambari_server.upgrade(args)
-
-    self.assertTrue(get_db_cli_tool_mock.called)
-    self.assertTrue(execute_remote_script_mock.called)
-
-
-  def test_prepare_schema_upgrade_command(self):
-    test_command = "sqlplus -S -L " \
-                   "'user/pass@(description=(address=(protocol=TCP)(host=oraclehost)(port=1521))(connect_data=(sid=db_SID)))' " \
-                   "@/var/lib/ambari-server/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql user"
-    args = MagicMock()
-    args.database="oracle"
-    args.database_username="user"
-    args.database_password="pass"
-    args.database_host="oraclehost"
-    args.sid_or_sname="sid"
-    args.jdbc_url="fake"
-    args.database_port="1521"
-    args.database_name="db_SID"
-
-    command = ambari_server.prepare_schema_upgrade_command(args)
-
-    self.assertEqual(command, test_command)
-
-    pass
-
-  def test_prepare_local_repo_upgrade_commands(self):
-    test_commands = [
-      "sqlplus -S -L 'user/pass@(description=(address=(protocol=TCP)(host=oraclehost)(port=1521))(connect_data=(sid=db_SID)))' @/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-INSERT_METAINFO.sql key value",
-      "sqlplus -S -L 'user/pass@(description=(address=(protocol=TCP)(host=oraclehost)(port=1521))(connect_data=(sid=db_SID)))' @/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql"
-    ]
-    args = MagicMock()
-    args.database="oracle"
-    args.database_username="user"
-    args.database_password="pass"
-    args.database_host="oraclehost"
-    args.sid_or_sname="sid"
-    args.jdbc_url="fake"
-    args.database_port="1521"
-    args.database_name="db_SID"
-
-    commands = ambari_server.prepare_local_repo_upgrade_commands(args, "key", "value")
-
-    self.assertEqual(commands, test_commands)
-
-    pass
-
-  def test_prepare_stack_upgrade_command(self):
-    test_command = "sqlplus -S -L 'user/pass@(description=(address=(protocol=TCP)(host=oraclehost)(port=1521))(connect_data=(sid=db_SID)))' " \
-                   "@/var/lib/ambari-server/resources/upgrade/dml/Ambari-DML-Oracle-UPGRADE_STACK.sql HDP 2.1.1"
-    args = MagicMock()
-    args.database="oracle"
-    args.database_username="user"
-    args.database_password="pass"
-    args.database_host="oraclehost"
-    args.sid_or_sname="sid"
-    args.jdbc_url="fake"
-    args.database_port="1521"
-    args.database_name="db_SID"
-
-    command = ambari_server.prepare_stack_upgrade_command(args, "HDP-2.1.1")
-
-    self.assertEqual(command, test_command)
-
-    pass
 
   def test_print_info_msg(self):
     out = StringIO.StringIO()
@@ -4628,13 +4576,13 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
   @patch("os.path.exists")
   @patch.object(ambari_server, "load_stack_values")
   @patch.object(ambari_server, "get_ambari_properties")
-  @patch.object(ambari_server, "upgrade_local_repo_db")
+  @patch.object(ambari_server, "run_metainfo_upgrade")
   def test_upgrade_local_repo(self,
-                         upgrade_local_repo_db_mock,
-                         get_ambari_properties_mock,
-                         load_stack_values_mock,
-                         os_path_exists_mock,
-                         os_listdir_mock):
+                           run_metainfo_upgrade_mock,
+                           get_ambari_properties_mock,
+                           load_stack_values_mock,
+                           os_path_exists_mock,
+                           os_listdir_mock):
 
     from mock.mock import call
     args = MagicMock()
@@ -4658,15 +4606,16 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
 
     self.assertTrue(get_ambari_properties_mock.called)
     self.assertTrue(load_stack_values_mock.called)
-    self.assertTrue(upgrade_local_repo_db_mock.called)
+    self.assertTrue(run_metainfo_upgrade_mock.called)
+    run_metainfo_upgrade_mock.assert_called_with({'a': 'http://newurl'})
 
   @patch("os.listdir")
   @patch("os.path.exists")
   @patch.object(ambari_server, "load_stack_values")
   @patch.object(ambari_server, "get_ambari_properties")
-  @patch.object(ambari_server, "upgrade_local_repo_db")
+  @patch.object(ambari_server, "run_metainfo_upgrade")
   def test_upgrade_local_repo_nochange(self,
-                         upgrade_local_repo_db_mock,
+                         run_metainfo_upgrade_mock,
                          get_ambari_properties_mock,
                          load_stack_values_mock,
                          os_path_exists_mock,
@@ -4692,6 +4641,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
 
     self.assertTrue(get_ambari_properties_mock.called)
     self.assertTrue(load_stack_values_mock.called)
-    self.assertFalse(upgrade_local_repo_db_mock.called)
+    self.assertTrue(run_metainfo_upgrade_mock.called)
+    run_metainfo_upgrade_mock.assert_called_with({})
 
 


[4/4] git commit: AMBARI-4716. Run Ambari Server Upgrade via code rather than DDL/DML. (mpapirkovskyy)

Posted by mp...@apache.org.
AMBARI-4716. Run Ambari Server Upgrade via code rather than DDL/DML. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fea7b622
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fea7b622
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fea7b622

Branch: refs/heads/trunk
Commit: fea7b6222311a48604c5a96d9478c53fbd7ce26f
Parents: c02c7bf
Author: Myroslav Papirkovskyy <mp...@hortonworks.com>
Authored: Fri Feb 21 21:02:27 2014 +0200
Committer: Myroslav Papirkovskyy <mp...@hortonworks.com>
Committed: Fri Feb 21 21:03:40 2014 +0200

----------------------------------------------------------------------
 ambari-server/pom.xml                           |  19 +-
 .../server/configuration/Configuration.java     |  20 +-
 .../server/controller/ControllerModule.java     |   5 +-
 .../apache/ambari/server/orm/DBAccessor.java    | 319 ++++++++++++
 .../ambari/server/orm/DBAccessorImpl.java       | 455 +++++++++++++++++
 .../orm/dao/HostComponentDesiredStateDAO.java   |  13 +
 .../server/orm/dao/HostComponentStateDAO.java   |  13 +
 .../dao/ServiceComponentDesiredStateDAO.java    |  14 +
 .../server/orm/dao/ServiceDesiredStateDAO.java  |  14 +
 .../ambari/server/orm/helpers/ScriptRunner.java | 254 ++++++++++
 .../server/orm/helpers/dbms/DbmsHelper.java     |  72 +++
 .../server/orm/helpers/dbms/DerbyHelper.java    |  52 ++
 .../orm/helpers/dbms/GenericDbmsHelper.java     | 273 +++++++++++
 .../server/orm/helpers/dbms/MySqlHelper.java    |  49 ++
 .../server/orm/helpers/dbms/OracleHelper.java   |  46 ++
 .../server/orm/helpers/dbms/PostgresHelper.java |  46 ++
 .../server/upgrade/AbstractUpgradeCatalog.java  | 192 ++++++++
 .../server/upgrade/SchemaUpgradeHelper.java     | 211 ++++++++
 .../server/upgrade/StackUpgradeHelper.java      | 159 ++++++
 .../ambari/server/upgrade/StackUpgradeUtil.java | 142 ++++++
 .../ambari/server/upgrade/UpgradeCatalog.java   |  55 +++
 .../server/upgrade/UpgradeCatalog150.java       | 487 +++++++++++++++++++
 .../apache/ambari/server/utils/DateUtils.java   |  13 +
 .../ambari/server/utils/VersionUtils.java       |  14 +
 ambari-server/src/main/python/ambari-server.py  | 469 ++++--------------
 .../upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql    |  95 ----
 .../upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql   | 134 -----
 .../ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql   | 250 ----------
 .../main/resources/upgrade/ddl/quartz.mysql.sql |  53 ++
 .../resources/upgrade/ddl/quartz.oracle.sql     |  55 +++
 .../resources/upgrade/ddl/quartz.postgres.sql   |  53 ++
 .../dml/Ambari-DML-Oracle-FIX_LOCAL_REPO.sql    |  45 --
 .../dml/Ambari-DML-Postgres-FIX_LOCAL_REPO.sql  |  44 --
 .../ambari/server/orm/DBAccessorImplTest.java   | 253 ++++++++++
 .../server/upgrade/UpgradeCatalogTest.java      | 105 ++++
 .../src/test/python/TestAmbariServer.py         | 226 ++++-----
 ambari-server/src/test/python/ambari.properties |   1 +
 37 files changed, 3617 insertions(+), 1103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index bc83dd8..0da58d1 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -350,19 +350,7 @@
               <directory>/var/lib/ambari-server/resources/upgrade/ddl</directory>
               <sources>
                 <source>
-                  <location>target/classes/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql</location>
-                </source>
-                <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.Fix.sql</location>
-                </source>
-                <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.Check.sql</location>
-                </source>
-                <source>
-                  <location>target/classes/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql</location>
-                </source>
-                <source>
-                  <location>target/classes/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql</location>
+                  <location>src/main/resources/upgrade/ddl</location>
                 </source>
               </sources>
             </mapping>
@@ -618,6 +606,11 @@
       <artifactId>guice-servlet</artifactId>
     </dependency>
     <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-multibindings</artifactId>
+      <version>3.0</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.derby</groupId>
       <artifactId>derby</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 9fe104c..56bb58c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -97,6 +97,7 @@ public class Configuration {
   public static final String POSTGRES_DB_NAME = "postgres";
   public static final String ORACLE_DB_NAME = "oracle";
   public static final String MYSQL_DB_NAME = "mysql";
+  public static final String DERBY_DB_NAME = "derby";
   public static final String OJDBC_JAR_NAME_KEY = "db.oracle.jdbc.name";
   public static final String OJDBC_JAR_NAME_DEFAULT = "ojdbc6.jar";
   public static final String MYSQL_JAR_NAME_KEY = "db.mysql.jdbc.name";
@@ -224,7 +225,7 @@ public class Configuration {
       "/var/lib/ambari-server/resources/custom_action_definitions";
 
   private static final long SERVER_EC_CACHE_SIZE_DEFAULT = 10000L;
-  private static final String SERVER_JDBC_USER_NAME_DEFAULT = "ambari-server";
+  private static final String SERVER_JDBC_USER_NAME_DEFAULT = "ambari";
   private static final String SERVER_JDBC_USER_PASSWD_DEFAULT = "bigdata";
   private static final String SERVER_JDBC_RCA_USER_NAME_DEFAULT = "mapred";
   private static final String SERVER_JDBC_RCA_USER_PASSWD_DEFAULT = "mapred";
@@ -235,7 +236,7 @@ public class Configuration {
   private static final String SRVR_CRT_PASS_LEN_DEFAULT = "50";
   private static final String PASSPHRASE_ENV_DEFAULT = "AMBARI_PASSPHRASE";
   private static final String RESOURCES_DIR_DEFAULT =
-      "/var/share/ambari/resources/";
+      "/var/lib/ambari-server/resources/";
   private static final String ANONYMOUS_AUDIT_NAME_KEY = "anonymous.audit.name";
   private static final String CLIENT_SECURITY_DEFAULT = "local";
   private static final int CLIENT_API_PORT_DEFAULT = 8080;
@@ -601,11 +602,19 @@ public class Configuration {
   }
 
   public String getDatabaseDriver() {
-    return properties.getProperty(SERVER_JDBC_DRIVER_KEY, JDBC_LOCAL_DRIVER);
+    if (getPersistenceType() != PersistenceType.IN_MEMORY) {
+      return properties.getProperty(SERVER_JDBC_DRIVER_KEY, JDBC_LOCAL_DRIVER);
+    } else {
+      return JDBC_IN_MEMROY_DRIVER;
+    }
   }
 
   public String getDatabaseUrl() {
-    return properties.getProperty(SERVER_JDBC_URL_KEY, getLocalDatabaseUrl());
+    if (getPersistenceType() != PersistenceType.IN_MEMORY) {
+      return properties.getProperty(SERVER_JDBC_URL_KEY, getLocalDatabaseUrl());
+    } else {
+      return JDBC_IN_MEMORY_URL;
+    }
   }
 
   public String getLocalDatabaseUrl() {
@@ -929,4 +938,7 @@ public class Configuration {
     }
   }
 
+  public String getResourceDirPath() {
+    return properties.getProperty(RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index adb78c3..ae57af2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@ -37,6 +37,7 @@ import org.apache.ambari.server.controller.internal.HostComponentResourceProvide
 import org.apache.ambari.server.controller.internal.HostResourceProvider;
 import org.apache.ambari.server.controller.internal.ServiceResourceProvider;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.orm.DBAccessorImpl;
 import org.apache.ambari.server.orm.PersistenceType;
 import org.apache.ambari.server.scheduler.ExecutionScheduler;
 import org.apache.ambari.server.scheduler.ExecutionSchedulerImpl;
@@ -70,13 +71,12 @@ import org.apache.ambari.server.state.scheduler.RequestExecutionImpl;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
 import org.springframework.security.crypto.password.PasswordEncoder;
 import org.springframework.security.crypto.password.StandardPasswordEncoder;
-
 import java.security.SecureRandom;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
-
 import com.google.gson.GsonBuilder;
+import org.apache.ambari.server.orm.DBAccessor;
 import static org.eclipse.persistence.config.PersistenceUnitProperties.CREATE_JDBC_DDL_FILE;
 import static org.eclipse.persistence.config.PersistenceUnitProperties.CREATE_ONLY;
 import static org.eclipse.persistence.config.PersistenceUnitProperties.CREATE_OR_EXTEND;
@@ -247,6 +247,7 @@ public class ControllerModule extends AbstractModule {
     install(new FactoryModuleBuilder().build(RequestFactory.class));
 
     bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
+    bind(DBAccessor.class).to(DBAccessorImpl.class);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
new file mode 100644
index 0000000..3b3d2e5
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm;
+
+import org.eclipse.persistence.sessions.DatabaseSession;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.List;
+
+/**
+ * Interface for schema manipulation
+ * Note: IF NOT EXISTS is default for all supported DDL statements
+ */
+public interface DBAccessor {
+
+  /**
+   * Create new table
+   * @param tableName
+   * @param columnInfo
+   * @param primaryKeyColumns
+   * @throws SQLException
+   */
+  public void createTable(String tableName, List<DBColumnInfo> columnInfo,
+                          String... primaryKeyColumns) throws SQLException;
+
+  /**
+   * Create new index
+   * @param indexName
+   * @param tableName
+   * @param columnNames
+   * @throws SQLException
+   */
+  public void createIndex(String indexName, String tableName,
+                          String... columnNames) throws SQLException;
+
+
+  /**
+   * Add foreign key for a relation
+   * @param tableName
+   * @param constraintName
+   * @param keyColumn
+   * @param referenceColumn
+   * @throws SQLException
+   */
+  public void addFKConstraint(String tableName,
+                              String constraintName,
+                              String keyColumn,
+                              String referenceTableName,
+                              String referenceColumn,
+                              boolean ignoreFailure) throws SQLException;
+
+  /**
+   * Add foreign key for a relation
+   * @param tableName
+   * @param constraintName
+   * @param keyColumn
+   * @param referenceColumn
+   * @throws SQLException
+   */
+  public void addFKConstraint(String tableName,
+                              String constraintName,
+                              String[] keyColumns,
+                              String referenceTableName,
+                              String[] referenceColumns,
+                              boolean ignoreFailure) throws SQLException;
+
+  /**
+   * Add column to existing table
+   * @param tableName
+   * @param columnInfo
+   * @throws SQLException
+   */
+  public void addColumn(String tableName,
+                        DBColumnInfo columnInfo) throws SQLException;
+
+  /**
+   * Rename existing column
+   * @param tableName
+   * @param oldColumnName
+   * @param columnInfo
+   * @throws SQLException
+   */
+  void renameColumn(String tableName, String oldColumnName,
+                    DBColumnInfo columnInfo) throws SQLException;
+
+  /**
+   * Alter column from existing table, only supports varchar extension <br/>
+   * Use following sequence for more complex stuff: <br/>
+   * <li/>{@link #addColumn(String, org.apache.ambari.server.orm.DBAccessor.DBColumnInfo)}
+   * <li/>{@link #updateTable(String, String, Object, String)}
+   * <li/>{@link #dropColumn(String, String)}
+   * <li/>{@link #renameColumn(String, String, org.apache.ambari.server.orm.DBAccessor.DBColumnInfo)}
+   * @param tableName
+   * @param columnInfo
+   * @throws SQLException
+   */
+  public void alterColumn(String tableName,
+                          DBColumnInfo columnInfo) throws SQLException;
+
+  /**
+   * Insert row into table
+   *
+   * @param tableName
+   * @param columnNames
+   * @param values
+   * @param ignoreFailure
+   * @return
+   * @throws SQLException
+   */
+  boolean insertRow(String tableName, String[] columnNames, String[] values, boolean ignoreFailure) throws SQLException;
+
+  /**
+   * Simple update operation on table
+   * @param tableName
+   * @param columnName
+   * @param value
+   * @param whereClause
+   * @return
+   * @throws SQLException
+   */
+  public int updateTable(String tableName, String columnName, Object value,
+                         String whereClause) throws SQLException;
+
+  /**
+   * Helper method to run third party scripts like Quartz DDL
+   * @param filePath
+   * @throws SQLException
+   */
+  public void executeScript(String filePath) throws SQLException, IOException;
+
+
+  /**
+   * Execute ad-hoc query on DB.
+   * @param query
+   * @throws SQLException
+   */
+  public void executeQuery(String query) throws SQLException;
+
+  /**
+   * Execute query on DB
+   * @param query
+   * @param ignoreFailure
+   * @throws SQLException
+   */
+  void executeQuery(String query, boolean ignoreFailure) throws SQLException;
+
+  /**
+   * Drop table from schema
+   * @param tableName
+   * @throws SQLException
+   */
+  public void dropTable(String tableName) throws SQLException;
+
+  /**
+   * Delete all table data
+   * @param tableName
+   * @throws SQLException
+   */
+  public void truncateTable(String tableName) throws SQLException;
+
+  /**
+   * Drop a column from table
+   * @param tableName
+   * @param columnName
+   * @throws SQLException
+   */
+  public void dropColumn(String tableName, String columnName) throws SQLException;
+
+  /**
+   * Drop sequence
+   * @param sequenceName
+   * @throws SQLException
+   */
+  public void dropSequence(String sequenceName) throws SQLException;
+
+  /**
+   * Drop a constraint from table
+   * @param tableName
+   * @param constraintName
+   * @throws SQLException
+   */
+  public void dropConstraint(String tableName, String constraintName) throws SQLException;
+
+  /**
+   * Verify if table exists by looking at metadata.
+   * @param tableName
+   * @return
+   * @throws SQLException
+   */
+  public boolean tableExists(String tableName) throws SQLException;
+
+  /**
+   * Verify if table has any data
+   * @param tableName
+   * @return
+   * @throws SQLException
+   */
+  public boolean tableHasData(String tableName) throws SQLException;
+
+  /**
+   * Verify if table already has a column defined.
+   * @param tableName
+   * @param columnName
+   * @return
+   * @throws SQLException
+   */
+  public boolean tableHasColumn(String tableName, String columnName) throws SQLException;
+
+  /**
+   * Verify if table already has a FK constraint.
+   * @param tableName
+   * @param refTableName
+   * @param columnName
+   * @param refColumnName
+   * @return
+   * @throws SQLException
+   */
+  public boolean tableHasForeignKey(String tableName, String refTableName,
+             String columnName, String refColumnName) throws SQLException;
+
+  /**
+   * Get a new DB session
+   * @return
+   */
+  public DatabaseSession getNewDatabaseSession();
+
+  /**
+   * Capture column type
+   */
+  public class DBColumnInfo {
+    private String name;
+    private Class type;
+//    private DBColumnType type;
+    private Integer length;
+    private Object defaultValue;
+    private boolean isNullable;
+
+    public DBColumnInfo(String name, Class type, Integer length) {
+      this(name, type, length, null, false);
+    }
+
+    public DBColumnInfo(String name, Class type, Integer length,
+                        Object defaultValue, boolean nullable) {
+      this.name = name;
+      this.type = type;
+      this.length = length;
+      this.defaultValue = defaultValue;
+      isNullable = nullable;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public void setName(String name) {
+      this.name = name;
+    }
+
+    public Class getType() {
+      return type;
+    }
+
+    public void setType(Class type) {
+      this.type = type;
+    }
+
+    public Integer getLength() {
+      return length;
+    }
+
+    public void setLength(Integer length) {
+      this.length = length;
+    }
+
+    public Object getDefaultValue() {
+      return defaultValue;
+    }
+
+    public void setDefaultValue(Object defaultValue) {
+      this.defaultValue = defaultValue;
+    }
+
+    public boolean isNullable() {
+      return isNullable;
+    }
+
+    public void setNullable(boolean nullable) {
+      isNullable = nullable;
+    }
+
+    public enum DBColumnType {
+      VARCHAR,
+      CHAR,
+      INT,
+      LONG,
+      BOOL,
+      TIME,
+      BLOB
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
new file mode 100644
index 0000000..e30d4f1
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm;
+
+import com.google.inject.Inject;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.helpers.ScriptRunner;
+import org.apache.ambari.server.orm.helpers.dbms.*;
+import org.eclipse.persistence.internal.helper.DBPlatformHelper;
+import org.eclipse.persistence.internal.sessions.DatabaseSessionImpl;
+import org.eclipse.persistence.logging.AbstractSessionLog;
+import org.eclipse.persistence.logging.SessionLogEntry;
+import org.eclipse.persistence.platform.database.*;
+import org.eclipse.persistence.sessions.DatabaseLogin;
+import org.eclipse.persistence.sessions.DatabaseSession;
+import org.eclipse.persistence.sessions.Login;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+public class DBAccessorImpl implements DBAccessor {
+  private static final Logger LOG = LoggerFactory.getLogger(DBAccessorImpl.class);
+  private final DatabasePlatform databasePlatform;
+  private final Connection connection;
+  private final DbmsHelper dbmsHelper;
+  private Configuration configuration;
+  private DatabaseMetaData databaseMetaData;
+
+  @Inject
+  public DBAccessorImpl(Configuration configuration) {
+    this.configuration = configuration;
+
+    try {
+      Class.forName(configuration.getDatabaseDriver());
+
+      connection = DriverManager.getConnection(configuration.getDatabaseUrl(),
+        configuration.getDatabaseUser(),
+        configuration.getDatabasePassword());
+
+      //TODO create own mapping and platform classes for supported databases
+      String vendorName = connection.getMetaData().getDatabaseProductName() +
+        connection.getMetaData().getDatabaseMajorVersion();
+      String dbPlatform = DBPlatformHelper.getDBPlatform(vendorName, new AbstractSessionLog() {
+        @Override
+        public void log(SessionLogEntry sessionLogEntry) {
+          LOG.debug(sessionLogEntry.getMessage());
+        }
+      });
+      this.databasePlatform = (DatabasePlatform) Class.forName(dbPlatform).newInstance();
+      this.dbmsHelper = loadHelper(databasePlatform);
+    } catch (Exception e) {
+      String message = "Error while creating database accessor ";
+      LOG.error(message, e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected DbmsHelper loadHelper(DatabasePlatform databasePlatform) {
+    if (databasePlatform instanceof OraclePlatform) {
+      return new OracleHelper(databasePlatform);
+    }else if (databasePlatform instanceof MySQLPlatform) {
+      return new MySqlHelper(databasePlatform);
+    }else if (databasePlatform instanceof PostgreSQLPlatform) {
+      return new PostgresHelper(databasePlatform);
+    }else if (databasePlatform instanceof DerbyPlatform) {
+      return new DerbyHelper(databasePlatform);
+    } else {
+      return new GenericDbmsHelper(databasePlatform);
+    }
+  }
+
+  protected Connection getConnection() {
+    return connection;
+  }
+
+  @Override
+  public void createTable(String tableName, List<DBColumnInfo> columnInfo,
+                          String... primaryKeyColumns) throws SQLException {
+    if (!tableExists(tableName)) {
+      String query = dbmsHelper.getCreateTableStatement(tableName, columnInfo, Arrays.asList(primaryKeyColumns));
+
+      executeQuery(query);
+    }
+  }
+
+  private DatabaseMetaData getDatabaseMetaData() throws SQLException {
+    if (databaseMetaData == null) {
+      databaseMetaData = connection.getMetaData();
+    }
+
+    return databaseMetaData;
+  }
+
+  @Override
+  public boolean tableExists(String tableName) throws SQLException {
+    boolean result = false;
+    DatabaseMetaData metaData = getDatabaseMetaData();
+    String schemaFilter = null;
+    if (getDbType().equals(Configuration.ORACLE_DB_NAME)) {
+      // Optimization to not query everything
+      schemaFilter = configuration.getDatabaseUser();
+    }
+
+    ResultSet res = metaData.getTables(null, schemaFilter,
+      tableName.toUpperCase(), new String[] { "TABLE" });
+
+    if (res != null) {
+      try {
+        if (res.next()) {
+          return res.getString("TABLE_NAME") != null && res.getString
+            ("TABLE_NAME").equalsIgnoreCase(tableName);
+        }
+      } finally {
+        res.close();
+      }
+    }
+
+    return result;
+  }
+
+  protected String getDbType() {
+    String dbUrl = configuration.getDatabaseUrl();
+    String dbType;
+
+    if (dbUrl.contains(Configuration.POSTGRES_DB_NAME)) {
+      dbType = Configuration.POSTGRES_DB_NAME;
+    } else if (dbUrl.contains(Configuration.ORACLE_DB_NAME)) {
+      dbType = Configuration.ORACLE_DB_NAME;
+    } else if (dbUrl.contains(Configuration.MYSQL_DB_NAME)) {
+      dbType = Configuration.MYSQL_DB_NAME;
+    } else if (dbUrl.contains(Configuration.DERBY_DB_NAME)) {
+      dbType = Configuration.DERBY_DB_NAME;
+    } else {
+      throw new RuntimeException("Unable to determine database type.");
+    }
+
+    return dbType;
+  }
+
+  @Override
+  public boolean tableHasData(String tableName) throws SQLException {
+    String query = "SELECT count(*) from " + tableName;
+    Statement statement = getConnection().createStatement();
+    ResultSet rs = statement.executeQuery(query);
+    boolean retVal = false;
+    if (rs != null) {
+      if (rs.next()) {
+        return rs.getInt(0) > 0;
+      }
+    }
+    return retVal;
+  }
+
+  @Override
+  public boolean tableHasColumn(String tableName, String columnName) throws SQLException {
+    boolean result = false;
+    DatabaseMetaData metaData = getDatabaseMetaData();
+    String schemaFilter = null;
+    if (getDbType().equals(Configuration.ORACLE_DB_NAME)) {
+      // Optimization to not query everything
+      schemaFilter = configuration.getDatabaseUser();
+    }
+
+    ResultSet rs = metaData.getColumns(null, schemaFilter,
+        tableName.toUpperCase(), columnName.toUpperCase());
+
+    if (rs != null) {
+      try {
+        if (rs.next()) {
+          return rs.getString("COLUMN_NAME") != null && rs.getString
+            ("COLUMN_NAME").equalsIgnoreCase(columnName);
+        }
+      } finally {
+        rs.close();
+      }
+    }
+
+    return result;
+  }
+
+  @Override
+  public boolean tableHasForeignKey(String tableName, String refTableName,
+              String columnName, String refColumnName) throws SQLException {
+    boolean result = false;
+    DatabaseMetaData metaData = getDatabaseMetaData();
+    String schemaFilter = null;
+    if (getDbType().equals(Configuration.ORACLE_DB_NAME)) {
+      // Optimization to not query everything
+      schemaFilter = configuration.getDatabaseUser();
+    }
+
+    ResultSet rs = metaData.getCrossReference(null, schemaFilter, tableName,
+      null, schemaFilter, refTableName);
+
+    if (rs != null) {
+      try {
+        if (rs.next()) {
+          String refColumn = rs.getString("FKCOLUMN_NAME");
+          result = refColumn != null && refColumn.equalsIgnoreCase(refColumnName);
+        }
+      } finally {
+        rs.close();
+      }
+    }
+
+    return result;
+  }
+
+  @Override
+  public void createIndex(String indexName, String tableName,
+                          String... columnNames) throws SQLException {
+    String query = dbmsHelper.getCreateIndexStatement(indexName, tableName, columnNames);
+
+    executeQuery(query);
+  }
+
+  @Override
+  public void addFKConstraint(String tableName, String constraintName,
+                              String keyColumn, String referenceTableName,
+                              String referenceColumn, boolean ignoreFailure) throws SQLException {
+
+    if (!tableHasForeignKey(tableName, referenceTableName, keyColumn, referenceColumn)) {
+
+      String query = dbmsHelper.getAddForeignKeyStatement(tableName, constraintName,
+        Collections.singletonList(keyColumn),
+        referenceTableName,
+        Collections.singletonList(referenceColumn)
+      );
+
+      try {
+        executeQuery(query);
+      } catch (SQLException e) {
+        LOG.warn("Add FK constraint failed" +
+          ", constraintName = " + constraintName +
+          ", tableName = " + tableName, e);
+        if (!ignoreFailure) {
+          throw e;
+        }
+      }
+    }
+  }
+
+  @Override
+  public void addFKConstraint(String tableName, String constraintName,
+                              String[] keyColumns, String referenceTableName,
+                              String[] referenceColumns, boolean ignoreFailure) throws SQLException {
+    String query = dbmsHelper.getAddForeignKeyStatement(tableName, constraintName,
+        Arrays.asList(keyColumns),
+        referenceTableName,
+        Arrays.asList(referenceColumns)
+    );
+
+    try {
+      executeQuery(query);
+    } catch (SQLException e) {
+      LOG.warn("Add FK constraint failed" +
+              ", constraintName = " + constraintName +
+              ", tableName = " + tableName, e);
+      if (!ignoreFailure) {
+        throw e;
+      }
+    }
+  }
+
+  @Override
+  public void renameColumn(String tableName, String oldColumnName,
+                           DBColumnInfo columnInfo) throws SQLException {
+    //it is mandatory to specify type in column change clause for mysql
+    String renameColumnStatement = dbmsHelper.getRenameColumnStatement(tableName, oldColumnName, columnInfo);
+    executeQuery(renameColumnStatement);
+
+  }
+
+  @Override
+  public void addColumn(String tableName, DBColumnInfo columnInfo) throws SQLException {
+    if (!tableHasColumn(tableName, columnInfo.getName())) {
+      //TODO workaround for default values, possibly we will have full support later
+      if (columnInfo.getDefaultValue() != null) {
+        columnInfo.setNullable(true);
+      }
+      String query = dbmsHelper.getAddColumnStatement(tableName, columnInfo);
+      executeQuery(query);
+
+      if (columnInfo.getDefaultValue() != null) {
+        updateTable(tableName, columnInfo.getName(), columnInfo.getDefaultValue(), "");
+      }
+    }
+  }
+
+  @Override
+  public void alterColumn(String tableName, DBColumnInfo columnInfo) throws SQLException {
+    //varchar extension only (derby limitation, but not too much for others),
+    //use addColumn-update-drop-rename for more
+    String statement = dbmsHelper.getAlterColumnStatement(tableName, columnInfo);
+    executeQuery(statement);
+  }
+
+  @Override
+  public boolean insertRow(String tableName, String[] columnNames, String[] values, boolean ignoreFailure) throws SQLException {
+    StringBuilder builder = new StringBuilder();
+    builder.append("INSERT INTO ").append(tableName).append("(");
+    if (columnNames.length != values.length) {
+      throw new IllegalArgumentException("number of columns should be equal to number of values");
+    }
+
+    for (int i = 0; i < columnNames.length; i++) {
+      builder.append(columnNames[i]);
+      if(i!=columnNames.length-1){
+        builder.append(",");
+      }
+    }
+
+    builder.append(") VALUES(");
+
+    for (int i = 0; i < values.length; i++) {
+      builder.append(values[i]);
+      if(i!=values.length-1){
+        builder.append(",");
+      }
+    }
+
+    builder.append(")");
+
+    Statement statement = getConnection().createStatement();
+    int rowsUpdated = 0;
+    String query = builder.toString();
+    try {
+      rowsUpdated = statement.executeUpdate(query);
+    } catch (SQLException e) {
+      LOG.warn("Unable to execute query: " + query, e);
+      if (!ignoreFailure) {
+        throw e;
+      }
+    }
+
+    return rowsUpdated != 0;
+  }
+
+
+  @Override
+  public int updateTable(String tableName, String columnName, Object value,
+                         String whereClause) throws SQLException {
+
+    StringBuilder query = new StringBuilder
+      (String.format("UPDATE %s SET %s = ", tableName, columnName));
+
+    // Only String and number supported.
+    // Taken from: org.eclipse.persistence.internal.databaseaccess.appendParameterInternal
+    Object dbValue = databasePlatform.convertToDatabaseType(value);
+    String valueString = value.toString();
+    if (dbValue instanceof String) {
+      valueString = "'" + value.toString() + "'";
+    }
+
+    query.append(valueString);
+    query.append(" ");
+    query.append(whereClause);
+
+    Statement statement = getConnection().createStatement();
+
+    return statement.executeUpdate(query.toString());
+  }
+
+  @Override
+  public void executeQuery(String query) throws SQLException {
+    executeQuery(query, false);
+  }
+
+  @Override
+  public void executeQuery(String query, boolean ignoreFailure) throws SQLException {
+    Statement statement = getConnection().createStatement();
+    try {
+      statement.execute(query);
+    } catch (SQLException e) {
+      LOG.warn("Error executing query: "+query, e);
+      if (!ignoreFailure) {
+        throw e;
+      }
+    }
+  }
+
+  @Override
+  public void dropTable(String tableName) throws SQLException {
+    String query = dbmsHelper.getDropTableStatement(tableName);
+    executeQuery(query);
+  }
+
+  @Override
+  public void truncateTable(String tableName) throws SQLException {
+    String query = "DELETE FROM " + tableName;
+    executeQuery(query);
+  }
+
+  @Override
+  public void dropColumn(String tableName, String columnName) throws SQLException {
+    throw new UnsupportedOperationException("Drop column not supported");
+  }
+
+  @Override
+  public void dropSequence(String sequenceName) throws SQLException {
+    executeQuery(dbmsHelper.getDropSequenceStatement(sequenceName));
+  }
+
+  @Override
+  public void dropConstraint(String tableName, String constraintName) throws SQLException {
+    String query = dbmsHelper.getDropConstraintStatement(tableName, constraintName);
+
+    executeQuery(query);
+  }
+
+  @Override
+  /**
+   * Execute script with autocommit and error tolerance, like psql and sqlplus do by default
+   */
+  public void executeScript(String filePath) throws SQLException, IOException {
+    BufferedReader br = new BufferedReader(new FileReader(filePath));
+    ScriptRunner scriptRunner = new ScriptRunner(getConnection(), true, false);
+    scriptRunner.runScript(br);
+  }
+
+  @Override
+  public DatabaseSession getNewDatabaseSession() {
+    Login login = new DatabaseLogin();
+    login.setUserName(configuration.getDatabaseUser());
+    login.setPassword(configuration.getDatabasePassword());
+    return new DatabaseSessionImpl(login);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
index f98df0f..f8b43a8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
@@ -26,6 +26,9 @@ import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
 
 import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
 
 @Singleton
 public class HostComponentDesiredStateDAO {
@@ -62,4 +65,14 @@ public class HostComponentDesiredStateDAO {
     remove(findByPK(primaryKey));
   }
 
+  @Transactional
+  public List<HostComponentDesiredStateEntity> findAll() {
+    TypedQuery<HostComponentDesiredStateEntity> query = entityManagerProvider.get()
+      .createQuery("SELECT hcd from HostComponentDesiredStateEntity hcd", HostComponentDesiredStateEntity.class);
+    try {
+      return query.getResultList();
+    } catch (NoResultException ignored) {
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
index 06d97d9..697d5f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
@@ -26,6 +26,9 @@ import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
 
 import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
 
 @Singleton
 public class HostComponentStateDAO {
@@ -62,4 +65,14 @@ public class HostComponentStateDAO {
     remove(findByPK(primaryKey));
   }
 
+  @Transactional
+  public List<HostComponentStateEntity> findAll() {
+    TypedQuery<HostComponentStateEntity> query = entityManagerProvider.get()
+      .createQuery("SELECT hsc from HostComponentStateEntity hsc", HostComponentStateEntity.class);
+    try {
+      return query.getResultList();
+    } catch (NoResultException ignored) {
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
index f062690..d1cea2e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
@@ -26,6 +26,9 @@ import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityP
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 
 import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
 
 @Singleton
 public class ServiceComponentDesiredStateDAO {
@@ -62,4 +65,15 @@ public class ServiceComponentDesiredStateDAO {
     remove(findByPK(primaryKey));
   }
 
+  @Transactional
+  public List<ServiceComponentDesiredStateEntity> findAll() {
+    TypedQuery<ServiceComponentDesiredStateEntity> query =
+      entityManagerProvider.get().
+        createQuery("SELECT scd from ServiceComponentDesiredStateEntity scd", ServiceComponentDesiredStateEntity.class);
+    try {
+      return query.getResultList();
+    } catch (NoResultException ignored) {
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
index b6224f0..bd47213 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
@@ -26,6 +26,9 @@ import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 
 import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
 
 @Singleton
 public class ServiceDesiredStateDAO {
@@ -62,4 +65,15 @@ public class ServiceDesiredStateDAO {
     remove(findByPK(primaryKey));
   }
 
+  @Transactional
+  public List<ServiceDesiredStateEntity> findAll() {
+    TypedQuery<ServiceDesiredStateEntity> query =
+      entityManagerProvider.get().
+        createQuery("SELECT sd from ServiceDesiredStateEntity sd", ServiceDesiredStateEntity.class);
+    try {
+      return query.getResultList();
+    } catch (NoResultException ignored) {
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/ScriptRunner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/ScriptRunner.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/ScriptRunner.java
new file mode 100644
index 0000000..6ab0cda
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/ScriptRunner.java
@@ -0,0 +1,254 @@
+/*
+ * Slightly modified version of the com.ibatis.common.jdbc.ScriptRunner class
+ * from the iBATIS Apache project. Only removed dependency on Resource class
+ * and a constructor
+ *
+ *
+ *
+ *  Copyright 2004 Clinton Begin
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.LineNumberReader;
+import java.io.PrintWriter;
+import java.io.Reader;
+import java.sql.*;
+
+/**
+ * Tool to run database scripts
+ * TODO replace logging, deal with licence properly or rewrite completely
+ */
+public class ScriptRunner {
+  private static final Logger LOG = LoggerFactory.getLogger(ScriptRunner.class);
+
+  private static final String DEFAULT_DELIMITER = ";";
+
+  private Connection connection;
+
+  private boolean stopOnError;
+  private boolean autoCommit;
+
+  private PrintWriter logWriter = new PrintWriter(System.out);
+  private PrintWriter errorLogWriter = new PrintWriter(System.err);
+
+  private String delimiter = DEFAULT_DELIMITER;
+  private boolean fullLineDelimiter = false;
+
+  /**
+   * Default constructor
+   */
+  public ScriptRunner(Connection connection, boolean autoCommit,
+                      boolean stopOnError) {
+    this.connection = connection;
+    this.autoCommit = autoCommit;
+    this.stopOnError = stopOnError;
+  }
+
+  public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
+    this.delimiter = delimiter;
+    this.fullLineDelimiter = fullLineDelimiter;
+  }
+
+  /**
+   * Setter for logWriter property
+   *
+   * @param logWriter
+   *            - the new value of the logWriter property
+   */
+  public void setLogWriter(PrintWriter logWriter) {
+    this.logWriter = logWriter;
+  }
+
+  /**
+   * Setter for errorLogWriter property
+   *
+   * @param errorLogWriter
+   *            - the new value of the errorLogWriter property
+   */
+  public void setErrorLogWriter(PrintWriter errorLogWriter) {
+    this.errorLogWriter = errorLogWriter;
+  }
+
+  /**
+   * Runs an SQL script (read in using the Reader parameter)
+   *
+   * @param reader
+   *            - the source of the script
+   */
+  public void runScript(Reader reader) throws IOException, SQLException {
+    try {
+      boolean originalAutoCommit = connection.getAutoCommit();
+      try {
+        if (originalAutoCommit != this.autoCommit) {
+          connection.setAutoCommit(this.autoCommit);
+        }
+        runScript(connection, reader);
+      } finally {
+        connection.setAutoCommit(originalAutoCommit);
+      }
+    } catch (IOException e) {
+      throw e;
+    } catch (SQLException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new RuntimeException("Error running script.  Cause: " + e, e);
+    }
+  }
+
+  /**
+   * Runs an SQL script (read in using the Reader parameter) using the
+   * connection passed in
+   *
+   * @param conn
+   *            - the connection to use for the script
+   * @param reader
+   *            - the source of the script
+   * @throws SQLException
+   *             if any SQL errors occur
+   * @throws IOException
+   *             if there is an error reading from the Reader
+   */
+  private void runScript(Connection conn, Reader reader) throws IOException,
+    SQLException {
+    StringBuffer command = null;
+    try {
+      LineNumberReader lineReader = new LineNumberReader(reader);
+      String line = null;
+      while ((line = lineReader.readLine()) != null) {
+        if (command == null) {
+          command = new StringBuffer();
+        }
+        String trimmedLine = line.trim();
+        if (trimmedLine.startsWith("--")) {
+          println(trimmedLine);
+        } else if (trimmedLine.length() < 1
+          || trimmedLine.startsWith("//")) {
+          // Do nothing
+        } else if (trimmedLine.length() < 1
+          || trimmedLine.startsWith("--")) {
+          // Do nothing
+        } else if (!fullLineDelimiter
+          && trimmedLine.endsWith(getDelimiter())
+          || fullLineDelimiter
+          && trimmedLine.equals(getDelimiter())) {
+          command.append(line.substring(0, line
+            .lastIndexOf(getDelimiter())));
+          command.append(" ");
+          Statement statement = conn.createStatement();
+
+          println(command);
+
+          boolean hasResults = false;
+          if (stopOnError) {
+            hasResults = statement.execute(command.toString());
+          } else {
+            try {
+              statement.execute(command.toString());
+            } catch (SQLException e) {
+              e.fillInStackTrace();
+              printlnError("Error executing: " + command);
+              printlnError(e);
+            }
+          }
+
+          if (autoCommit && !conn.getAutoCommit()) {
+            conn.commit();
+          }
+
+          ResultSet rs = statement.getResultSet();
+          if (hasResults && rs != null) {
+            ResultSetMetaData md = rs.getMetaData();
+            int cols = md.getColumnCount();
+            for (int i = 0; i < cols; i++) {
+              String name = md.getColumnLabel(i);
+              print(name + "\t");
+            }
+            println("");
+            while (rs.next()) {
+              for (int i = 0; i < cols; i++) {
+                String value = rs.getString(i);
+                print(value + "\t");
+              }
+              println("");
+            }
+          }
+
+          command = null;
+          try {
+            statement.close();
+          } catch (Exception e) {
+            // Ignore to workaround a bug in Jakarta DBCP
+          }
+          Thread.yield();
+        } else {
+          command.append(line);
+          command.append(" ");
+        }
+      }
+      if (!autoCommit) {
+        conn.commit();
+      }
+    } catch (SQLException e) {
+      e.fillInStackTrace();
+      printlnError("Error executing: " + command);
+      printlnError(e);
+      throw e;
+    } catch (IOException e) {
+      e.fillInStackTrace();
+      printlnError("Error executing: " + command);
+      printlnError(e);
+      throw e;
+    } finally {
+      conn.rollback();
+      flush();
+    }
+  }
+
+  private String getDelimiter() {
+    return delimiter;
+  }
+
+  private void print(Object o) {
+    if (logWriter != null) {
+      System.out.print(o);
+    }
+  }
+
+  private void println(Object o) {
+    if (logWriter != null) {
+      logWriter.println(o);
+    }
+  }
+
+  private void printlnError(Object o) {
+    if (errorLogWriter != null) {
+      errorLogWriter.println(o);
+    }
+  }
+
+  private void flush() {
+    if (logWriter != null) {
+      logWriter.flush();
+    }
+    if (errorLogWriter != null) {
+      errorLogWriter.flush();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
new file mode 100644
index 0000000..2e66006
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+
+import java.util.List;
+
+public interface DbmsHelper {
+
+  /**
+   * Check if column type can be modified directly
+   * @return
+   */
+  boolean supportsColumnTypeChange();
+
+  /**
+   * Generate rename column statement
+   * @param tableName
+   * @param oldName
+   * @param columnInfo definition of new column
+   * @return
+   */
+  String getRenameColumnStatement(String tableName, String oldName, DBAccessor.DBColumnInfo columnInfo);
+
+  /**
+   * Generate alter column statement
+   * @param tableName
+   * @param columnInfo
+   * @return
+   */
+  String getAlterColumnStatement(String tableName, DBAccessor.DBColumnInfo columnInfo);
+
+  String getCreateTableStatement(String tableName,
+                                 List<DBAccessor.DBColumnInfo> columns,
+                                 List<String> primaryKeyColumns);
+
+  String getCreateIndexStatement(String indexName, String tableName,
+                                 String... columnNames);
+
+  String getAddForeignKeyStatement(String tableName, String constraintName,
+                                   List<String> keyColumns,
+                                   String referenceTableName,
+                                   List<String> referenceColumns);
+
+  String getAddColumnStatement(String tableName, DBAccessor.DBColumnInfo columnInfo);
+
+  String getRenameColumnStatement(String tableName, String oldColumnName,
+                                  String newColumnName);
+
+  String getDropTableStatement(String tableName);
+
+  String getDropConstraintStatement(String tableName, String constraintName);
+
+  String getDropSequenceStatement(String sequenceName);
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DerbyHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DerbyHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DerbyHelper.java
new file mode 100644
index 0000000..00d4b86
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DerbyHelper.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+import org.eclipse.persistence.platform.database.DatabasePlatform;
+
+public class DerbyHelper extends GenericDbmsHelper {
+  public DerbyHelper(DatabasePlatform databasePlatform) {
+    super(databasePlatform);
+  }
+
+  @Override
+  public boolean supportsColumnTypeChange() {
+    return false; //type change is dramatically limited to varchar length increase only, almost useless
+  }
+
+  @Override
+  public String getRenameColumnStatement(String tableName, String oldName, DBAccessor.DBColumnInfo columnInfo) {
+    StringBuilder builder = new StringBuilder();
+
+    builder.append("RENAME COLUMN ").append(tableName).append(".").append(oldName);
+    builder.append(" TO ").append(columnInfo.getName());
+
+    return builder.toString();
+  }
+
+  @Override
+  public StringBuilder writeColumnModifyString(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    builder.append(" ALTER COLUMN ").append(columnInfo.getName())
+      .append(" SET DATA TYPE ");
+    writeColumnType(builder, columnInfo);
+
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
new file mode 100644
index 0000000..5f484a8
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+import org.eclipse.persistence.internal.databaseaccess.FieldTypeDefinition;
+import org.eclipse.persistence.internal.databaseaccess.Platform;
+import org.eclipse.persistence.internal.sessions.AbstractSession;
+import org.eclipse.persistence.platform.database.DatabasePlatform;
+import org.eclipse.persistence.tools.schemaframework.FieldDefinition;
+import org.eclipse.persistence.tools.schemaframework.ForeignKeyConstraint;
+import org.eclipse.persistence.tools.schemaframework.TableDefinition;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.util.List;
+
+public class GenericDbmsHelper implements DbmsHelper {
+  protected final DatabasePlatform databasePlatform;
+
+  public GenericDbmsHelper(DatabasePlatform databasePlatform) {
+    this.databasePlatform = databasePlatform;
+  }
+
+  @Override
+  public boolean supportsColumnTypeChange() {
+    return false;
+  }
+
+  @Override
+  public String getRenameColumnStatement(String tableName, String oldName, DBAccessor.DBColumnInfo columnInfo) {
+    StringBuilder stringBuilder = new StringBuilder();
+
+    writeAlterTableClause(stringBuilder, tableName);
+    writeColumnRenameString(stringBuilder, oldName, columnInfo);
+
+    return stringBuilder.toString();
+  }
+
+  @Override
+  public String getAlterColumnStatement(String tableName, DBAccessor.DBColumnInfo columnInfo) {
+    StringBuilder stringBuilder = new StringBuilder();
+    writeAlterTableClause(stringBuilder, tableName);
+    writeColumnModifyString(stringBuilder, columnInfo);
+
+
+    return stringBuilder.toString();
+  }
+
+  public StringBuilder writeAlterTableClause(StringBuilder builder, String tableName) {
+    builder.append("ALTER TABLE ").append(tableName).append(" ");
+    return builder;
+  }
+
+  public StringBuilder writeColumnModifyString(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    throw new UnsupportedOperationException("Column type modification not supported for generic DB");
+  }
+
+  public StringBuilder writeColumnRenameString(StringBuilder builder, String oldName, DBAccessor.DBColumnInfo newColumnInfo) {
+    throw new UnsupportedOperationException("Column rename not supported for generic DB");
+  }
+
+  public StringBuilder writeColumnType(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    FieldTypeDefinition fieldType;
+
+    fieldType = databasePlatform.getFieldTypeDefinition(columnInfo.getType());
+
+    if (fieldType == null) {
+      throw new IllegalArgumentException("Unable to convert data type");
+    }
+
+    FieldDefinition definition = convertToFieldDefinition(columnInfo);
+
+    StringWriter writer = new StringWriter();
+
+    try {
+      databasePlatform.printFieldTypeSize(writer, definition, fieldType, false); //Ambari doesn't use identity fields
+    } catch (IOException ignored) {
+      // no writing to file
+    }
+
+    builder.append(writer.toString());
+
+    return builder;
+  }
+
+  /**
+   * get create table statement
+   * @param tableName
+   * @param columns
+   * @param primaryKeyColumns
+   * @return
+   */
+  @Override
+  public String getCreateTableStatement(String tableName,
+                                        List<DBAccessor.DBColumnInfo> columns,
+                                        List<String> primaryKeyColumns) {
+    Writer stringWriter = new StringWriter();
+    writeCreateTableStatement(stringWriter, tableName, columns, primaryKeyColumns);
+    return stringWriter.toString();
+  }
+
+
+  /**
+   * Write create table statement to writer
+   * TODO default Value of column not supported
+   */
+  public Writer writeCreateTableStatement(Writer writer, String tableName,
+                                          List<DBAccessor.DBColumnInfo> columns,
+                                          List<String> primaryKeyColumns) {
+    //TODO validateNames(String tableName, List<DBAccessor.DBColumnInfo> columns)
+    //TODO validatePKNames(List<DBAccessor.DBColumnInfo> columns, String... primaryKeyColumns)
+
+    TableDefinition tableDefinition = new TableDefinition();
+    tableDefinition.setName(tableName);
+    for (DBAccessor.DBColumnInfo columnInfo : columns) {
+      //TODO case-sensitive for now
+      int length = columnInfo.getLength() != null ? columnInfo.getLength() : 0;
+
+      if (primaryKeyColumns.contains(columnInfo.getName())) {
+        tableDefinition.addIdentityField(columnInfo.getName(), columnInfo.getType(), length);
+      } else {
+        FieldDefinition fieldDefinition = convertToFieldDefinition(columnInfo);
+        tableDefinition.addField(fieldDefinition);
+      }
+    }
+
+    //TODO possibly move code to avoid unnecessary dependencies and allow extension
+    tableDefinition.buildCreationWriter(createStubAbstractSessionFromPlatform(databasePlatform), writer);
+
+    return writer;
+  }
+
+  public FieldDefinition convertToFieldDefinition(DBAccessor.DBColumnInfo columnInfo) {
+    int length = columnInfo.getLength() != null ? columnInfo.getLength() : 0;
+    FieldDefinition fieldDefinition = new FieldDefinition(columnInfo.getName(), columnInfo.getType(), length);
+    fieldDefinition.setShouldAllowNull(columnInfo.isNullable());
+    return fieldDefinition;
+  }
+
+  /**
+   * get create index statement
+   * @param indexName
+   * @param tableName
+   * @param columnNames
+   * @return
+   */
+  @Override
+  public String getCreateIndexStatement(String indexName, String tableName,
+                                        String... columnNames) {
+    //TODO validateColumnNames()
+    String createIndex = databasePlatform.buildCreateIndex(tableName, indexName, columnNames);
+    return createIndex;
+  }
+
+
+  @Override
+  public String getAddForeignKeyStatement(String tableName, String constraintName,
+                                          List<String> keyColumns,
+                                          String referenceTableName,
+                                          List<String> referenceColumns) {
+    ForeignKeyConstraint foreignKeyConstraint = new ForeignKeyConstraint();
+    foreignKeyConstraint.setName(constraintName);
+    foreignKeyConstraint.setTargetTable(referenceTableName);
+    foreignKeyConstraint.setSourceFields(keyColumns);
+    foreignKeyConstraint.setTargetFields(referenceColumns);
+
+    TableDefinition tableDefinition = new TableDefinition();
+    tableDefinition.setName(tableName);
+
+    Writer writer = new StringWriter();
+    tableDefinition.buildConstraintCreationWriter(createStubAbstractSessionFromPlatform(databasePlatform),
+        foreignKeyConstraint, writer);
+
+    return writer.toString();
+
+  }
+
+  @Override
+  public String getAddColumnStatement(String tableName, DBAccessor.DBColumnInfo columnInfo) {
+    Writer writer = new StringWriter();
+
+    TableDefinition tableDefinition = new TableDefinition();
+    tableDefinition.setName(tableName);
+    tableDefinition.buildAddFieldWriter(createStubAbstractSessionFromPlatform(databasePlatform),
+        convertToFieldDefinition(columnInfo), writer);
+
+    return writer.toString();
+  }
+
+
+  @Override
+  public String getRenameColumnStatement(String tableName, String oldColumnName,
+                                         String newColumnName) {
+
+    throw new UnsupportedOperationException("Rename operation not supported.");
+  }
+
+  @Override
+  public String getDropTableStatement(String tableName) {
+    Writer writer = new StringWriter();
+
+    TableDefinition tableDefinition = new TableDefinition();
+    tableDefinition.setName(tableName);
+    tableDefinition.buildDeletionWriter
+        (createStubAbstractSessionFromPlatform(databasePlatform), writer);
+
+    return writer.toString();
+  }
+
+  @Override
+  public String getDropConstraintStatement(String tableName, String constraintName) {
+    Writer writer = new StringWriter();
+
+    ForeignKeyConstraint foreignKeyConstraint = new ForeignKeyConstraint();
+    foreignKeyConstraint.setName(constraintName);
+    foreignKeyConstraint.setTargetTable(tableName);
+
+    TableDefinition tableDefinition = new TableDefinition();
+    tableDefinition.setName(tableName);
+    tableDefinition.buildConstraintDeletionWriter(
+        createStubAbstractSessionFromPlatform(databasePlatform),
+        foreignKeyConstraint, writer);
+
+    return writer.toString();
+  }
+
+  @Override
+  public String getDropSequenceStatement(String sequenceName) {
+    StringWriter writer = new StringWriter();
+    String defaultStmt = String.format("DROP sequence %s", sequenceName);
+
+    try {
+      Writer w = databasePlatform.buildSequenceObjectDeletionWriter(writer, sequenceName);
+      return w != null ? w.toString() : defaultStmt;
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    return defaultStmt;
+  }
+
+  public AbstractSession createStubAbstractSessionFromPlatform
+      (final DatabasePlatform databasePlatform) {
+    return new AbstractSession() {
+      @Override
+      public Platform getDatasourcePlatform() {
+        return databasePlatform;
+      }
+
+      @Override
+      public DatabasePlatform getPlatform() {
+        return databasePlatform;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
new file mode 100644
index 0000000..817ca9b
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+import org.eclipse.persistence.platform.database.DatabasePlatform;
+
+public class MySqlHelper extends GenericDbmsHelper {
+  public MySqlHelper(DatabasePlatform databasePlatform) {
+    super(databasePlatform);
+  }
+
+  @Override
+  public boolean supportsColumnTypeChange() {
+    return true;
+  }
+
+  @Override
+  public StringBuilder writeColumnRenameString(StringBuilder builder, String oldName, DBAccessor.DBColumnInfo newColumnInfo) {
+
+    builder.append(" CHANGE ").append(oldName).append(" ").append(newColumnInfo.getName()).append(" ");
+    writeColumnType(builder, newColumnInfo);
+
+    return builder;
+  }
+
+  @Override
+  public StringBuilder writeColumnModifyString(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    builder.append(" MODIFY ").append(columnInfo.getName()).append(" ");
+    writeColumnType(builder, columnInfo);
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
new file mode 100644
index 0000000..aebfb62
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+import org.eclipse.persistence.platform.database.DatabasePlatform;
+
+public class OracleHelper extends GenericDbmsHelper {
+  public OracleHelper(DatabasePlatform databasePlatform) {
+    super(databasePlatform);
+  }
+
+  @Override
+  public boolean supportsColumnTypeChange() {
+    return true;
+  }
+
+  @Override
+  public StringBuilder writeColumnRenameString(StringBuilder builder, String oldName, DBAccessor.DBColumnInfo newColumnInfo) {
+    builder.append(" RENAME COLUMN ").append(oldName).append(" TO ").append(newColumnInfo.getName());
+    return builder;
+  }
+
+  @Override
+  public StringBuilder writeColumnModifyString(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    builder.append(" MODIFY ").append(columnInfo.getName()).append(" ");
+    writeColumnType(builder, columnInfo);
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
new file mode 100644
index 0000000..38e837e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.helpers.dbms;
+
+import org.apache.ambari.server.orm.DBAccessor;
+import org.eclipse.persistence.platform.database.DatabasePlatform;
+
+public class PostgresHelper extends GenericDbmsHelper {
+  public PostgresHelper(DatabasePlatform databasePlatform) {
+    super(databasePlatform);
+  }
+
+  @Override
+  public boolean supportsColumnTypeChange() {
+    return true;
+  }
+
+  @Override
+  public StringBuilder writeColumnRenameString(StringBuilder builder, String oldName, DBAccessor.DBColumnInfo newColumnInfo) {
+    builder.append(" RENAME COLUMN ").append(oldName).append(" TO ").append(newColumnInfo.getName());
+    return builder;
+  }
+
+  @Override
+  public StringBuilder writeColumnModifyString(StringBuilder builder, DBAccessor.DBColumnInfo columnInfo) {
+    builder.append(" ALTER COLUMN ").append(columnInfo.getName()).append(" TYPE ");
+    writeColumnType(builder, columnInfo);
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
new file mode 100644
index 0000000..dfdfa85
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Provider;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.io.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.persistence.EntityManager;
+import java.io.File;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+
+public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
+  @Inject
+  protected DBAccessor dbAccessor;
+  @Inject
+  protected Configuration configuration;
+  @Inject
+  protected StackUpgradeUtil stackUpgradeUtil;
+
+  private Injector injector;
+  private static final Logger LOG = LoggerFactory.getLogger
+    (AbstractUpgradeCatalog.class);
+  private static final Map<String, UpgradeCatalog> upgradeCatalogMap =
+    new HashMap<String, UpgradeCatalog>();
+
+  @Inject
+  public AbstractUpgradeCatalog(Injector injector) {
+    this.injector = injector;
+    registerCatalog(this);
+  }
+
+  /**
+   * Every subclass needs to register itself
+   */
+  protected void registerCatalog(UpgradeCatalog upgradeCatalog) {
+    upgradeCatalogMap.put(upgradeCatalog.getTargetVersion(), upgradeCatalog);
+  }
+
+  @Override
+  public String getSourceVersion() {
+    return null;
+  }
+
+  protected static UpgradeCatalog getUpgradeCatalog(String version) {
+    return upgradeCatalogMap.get(version);
+  }
+
+  protected static class VersionComparator implements Comparator<UpgradeCatalog> {
+
+    @Override
+    public int compare(UpgradeCatalog upgradeCatalog1,
+                       UpgradeCatalog upgradeCatalog2) {
+      return VersionUtils.compareVersions(upgradeCatalog1.getTargetVersion(),
+        upgradeCatalog2.getTargetVersion(), 3);
+    }
+  }
+
+  /**
+   * Read server version file
+   * @return
+   */
+  protected String getAmbariServerVersion() {
+    String versionFilePath = configuration.getServerVersionFilePath();
+    try {
+      return FileUtils.readFileToString(new File(versionFilePath));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    return null;
+  }
+
+  /**
+   * Update metainfo to new version.
+   */
+  protected int updateMetaInfoVersion(String version) {
+    String ambariServerVersion = getAmbariServerVersion();
+    int rows = 0;
+
+    if (ambariServerVersion != null) {
+      try {
+        dbAccessor.executeQuery("INSERT INTO metainfo ('metainfo_key', " +
+          "'metainfo_value') VALUES ('version', '${ambariVersion}')");
+
+        rows = dbAccessor.updateTable("metainfo", "metainfo_value",
+          ambariServerVersion, String.format("WHERE metainfo_key = '%s'", version));
+      } catch (SQLException e) {
+        LOG.error("Failed updating metainfo table.", e);
+      }
+    }
+
+    return rows;
+  }
+
+  protected String getDbType() {
+    String dbUrl = configuration.getDatabaseUrl();
+    String dbType;
+
+    if (dbUrl.contains(Configuration.POSTGRES_DB_NAME)) {
+      dbType = Configuration.POSTGRES_DB_NAME;
+    } else if (dbUrl.contains(Configuration.ORACLE_DB_NAME)) {
+      dbType = Configuration.ORACLE_DB_NAME;
+    } else if (dbUrl.contains(Configuration.MYSQL_DB_NAME)) {
+      dbType = Configuration.MYSQL_DB_NAME;
+    } else {
+      throw new RuntimeException("Unable to determine database type.");
+    }
+
+    return dbType;
+  }
+
+  protected Provider<EntityManager> getEntityManagerProvider() {
+    return injector.getProvider(EntityManager.class);
+  }
+
+  protected void executeInTransaction(Runnable func) {
+    EntityManager entityManager = getEntityManagerProvider().get();
+    if (entityManager.getTransaction().isActive()) { //already started, reuse
+      func.run();
+    } else {
+      entityManager.getTransaction().begin();
+      try {
+        func.run();
+        entityManager.getTransaction().commit();
+      } catch (Exception e) {
+        entityManager.getTransaction().rollback();
+        throw new RuntimeException(e);
+      }
+
+    }
+  }
+
+  protected void changePostgresSearchPath() throws SQLException {
+    String dbUser = configuration.getDatabaseUser();
+    String dbName = configuration.getServerDBName();
+
+    dbAccessor.executeQuery(String.format("ALTER SCHEMA %s OWNER TO %s;", dbName, dbUser));
+
+    dbAccessor.executeQuery(String.format("ALTER ROLE %s SET search_path to '%s';", dbUser, dbName));
+  }
+
+  protected void grantAllPostgresPrivileges() throws SQLException {
+    String query = "GRANT ALL PRIVILEGES ON DATABASE ambari TO " +
+      configuration.getDatabaseUser();
+
+    dbAccessor.executeQuery(query);
+  }
+
+  @Override
+  public void upgradeSchema() throws AmbariException, SQLException {
+    if (getDbType().equals(Configuration.POSTGRES_DB_NAME)) {
+      changePostgresSearchPath();
+    }
+
+    this.executeDDLUpdates();
+  }
+
+  protected abstract void executeDDLUpdates() throws AmbariException, SQLException;
+
+  @Override
+  public String toString() {
+    return "{ " + this.getClass().getCanonicalName() +": " +
+      "sourceVersion = " + getSourceVersion() + ", " +
+      "targetVersion = " + getTargetVersion() + " }";
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fea7b622/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
new file mode 100644
index 0000000..5875cdb
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.multibindings.Multibinder;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.InputMismatchException;
+import java.util.List;
+import java.util.Set;
+
+public class SchemaUpgradeHelper {
+  private static final Logger LOG = LoggerFactory.getLogger
+    (SchemaUpgradeHelper.class);
+
+  private Set<UpgradeCatalog> allUpgradeCatalogs;
+  private MetainfoDAO metainfoDAO;
+  private PersistService persistService;
+
+  @Inject
+  public SchemaUpgradeHelper(Set<UpgradeCatalog> allUpgradeCatalogs, MetainfoDAO metainfoDAO,
+                             PersistService persistService) {
+    this.allUpgradeCatalogs = allUpgradeCatalogs;
+    this.metainfoDAO = metainfoDAO;
+    this.persistService = persistService;
+  }
+
+  private void startPersistenceService() {
+    persistService.start();
+  }
+
+  private void stopPersistenceService() {
+    persistService.stop();
+  }
+
+  public Set<UpgradeCatalog> getAllUpgradeCatalogs() {
+    return allUpgradeCatalogs;
+  }
+
+  private String readSourceVersion() {
+    String sourceVersion = null;
+
+    MetainfoEntity metainfoEntity = metainfoDAO.findByKey
+      (Configuration.SERVER_VERSION_KEY);
+
+    if (metainfoEntity != null) {
+      String version = metainfoEntity.getMetainfoValue();
+      if (version != null) {
+        sourceVersion = VersionUtils.getVersionSubstring(version);
+      }
+    }
+
+    return sourceVersion;
+  }
+
+  /**
+   * Return a set Upgrade catalogs to be applied to upgrade from
+   * @sourceVersion to @targetVersion
+   *
+   * @param sourceVersion
+   * @param targetVersion
+   * @return
+   * @throws org.apache.ambari.server.AmbariException
+   */
+  protected List<UpgradeCatalog> getUpgradePath(String sourceVersion,
+                                                       String targetVersion) throws AmbariException {
+
+    List<UpgradeCatalog> upgradeCatalogs = new ArrayList<UpgradeCatalog>();
+    List<UpgradeCatalog> candidateCatalogs = new ArrayList<UpgradeCatalog>(allUpgradeCatalogs);
+
+    Collections.sort(candidateCatalogs, new AbstractUpgradeCatalog.VersionComparator());
+
+    for (UpgradeCatalog upgradeCatalog : candidateCatalogs) {
+      if (sourceVersion == null || VersionUtils.compareVersions(sourceVersion,
+        upgradeCatalog.getTargetVersion(), 3) < 0) {
+        // catalog version is newer than source
+        if (VersionUtils.compareVersions(upgradeCatalog.getTargetVersion(),
+          targetVersion, 3) <= 0) {
+          // catalog version is older or equal to target
+          upgradeCatalogs.add(upgradeCatalog);
+        }
+      }
+    }
+
+    LOG.info("Upgrade path: " + upgradeCatalogs);
+
+    return upgradeCatalogs;
+  }
+
+  /**
+   * Extension of main controller module
+   */
+  public static class UpgradeHelperModule extends ControllerModule {
+    public UpgradeHelperModule() throws Exception {
+
+    }
+
+    @Override
+    protected void configure() {
+      super.configure();
+      // Add binding to each newly created catalog
+      Multibinder<UpgradeCatalog> catalogBinder =
+        Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
+      catalogBinder.addBinding().to(UpgradeCatalog150.class);
+    }
+  }
+
+  private void executeUpgrade(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
+    LOG.info("Executing DDL upgrade...");
+
+    if (upgradeCatalogs != null && !upgradeCatalogs.isEmpty()) {
+      for (UpgradeCatalog upgradeCatalog : upgradeCatalogs) {
+        try {
+          upgradeCatalog.upgradeSchema();
+        } catch (AmbariException e) {
+          LOG.error("Upgrade failed. ", e);
+          throw e;
+        } catch (SQLException e) {
+          LOG.error("Upgrade failed. ", e);
+          throw new AmbariException(e.getMessage());
+        }
+      }
+    }
+  }
+
+  private void executeDMLUpdates(List<UpgradeCatalog> upgradeCatalogs) throws AmbariException {
+    LOG.info("Execution DML changes.");
+
+    if (upgradeCatalogs != null && !upgradeCatalogs.isEmpty()) {
+      for (UpgradeCatalog upgradeCatalog : upgradeCatalogs) {
+        try {
+          upgradeCatalog.executeDMLUpdates();
+        } catch (AmbariException e) {
+          LOG.error("Upgrade failed. ", e);
+          throw e;
+        } catch (SQLException e) {
+          LOG.error("Upgrade failed. ", e);
+          throw new AmbariException(e.getMessage());
+        }
+      }
+    }
+  }
+
+  /**
+   * Upgrade Ambari DB schema to the target version passed in as the only
+   * argument.
+   * @param args args[0] = target version to upgrade to.
+   */
+  public static void main(String[] args) throws Exception {
+    if (args.length == 0) {
+      throw new InputMismatchException("Need to provide target version.");
+    }
+
+    String targetVersion = args[0];
+    LOG.info("Upgrading schema to target version = " + targetVersion);
+
+    UpgradeCatalog targetUpgradeCatalog = AbstractUpgradeCatalog
+      .getUpgradeCatalog(targetVersion);
+
+    LOG.debug("Target upgrade catalog. " + targetUpgradeCatalog);
+
+    Injector injector = Guice.createInjector(new UpgradeHelperModule());
+
+    SchemaUpgradeHelper schemaUpgradeHelper = injector.getInstance(SchemaUpgradeHelper.class);
+
+    // Read source version from DB
+    String sourceVersion = schemaUpgradeHelper.readSourceVersion();
+    LOG.info("Upgrading schema from source version = " + sourceVersion);
+
+    List<UpgradeCatalog> upgradeCatalogs =
+      schemaUpgradeHelper.getUpgradePath(sourceVersion, targetVersion);
+
+    schemaUpgradeHelper.executeUpgrade(upgradeCatalogs);
+
+    schemaUpgradeHelper.startPersistenceService();
+
+    schemaUpgradeHelper.executeDMLUpdates(upgradeCatalogs);
+
+    LOG.info("Upgrade successful.");
+
+    schemaUpgradeHelper.stopPersistenceService();
+  }
+}