You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2013/12/10 02:58:20 UTC
[1/3] git commit: AMBARI-4025. Smoke Tests for HBase fails on a 4
node cluster when smoke test is run on a non RS/Master host. (swagle)
Updated Branches:
refs/heads/trunk cc58951c1 -> 630983ab0
AMBARI-4025. Smoke Tests for HBase fails on a 4 node cluster when smoke test is run on a non RS/Master host. (swagle)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7ce91da3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7ce91da3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7ce91da3
Branch: refs/heads/trunk
Commit: 7ce91da37989374b78ed1e3ee0cd3d593a8742df
Parents: be6d0ef
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Dec 9 17:30:28 2013 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Mon Dec 9 17:31:42 2013 -0800
----------------------------------------------------------------------
.../src/main/puppet/modules/hdp-hbase/manifests/client.pp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7ce91da3/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
index 374b80b..b0931df 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
@@ -36,14 +36,14 @@ class hdp-hbase::client(
service_state => $service_state
}
- hdp::directory_recursive_create_ignore_failure { $hbase_tmp_dir:
+ hdp::directory_recursive_create_ignore_failure { "${hbase_tmp_dir}/local/jars":
owner => $hdp-hbase::params::hbase_user,
context_tag => 'hbase_client',
service_state => $service_state,
force => true
}
- Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure[ $hbase_tmp_dir ]
+ Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure<||>
}
} else {
hdp_fail("TODO not implemented yet: service_state = ${service_state}")
[2/3] git commit: AMBARI-4025. Smoke Tests for HBase fails on a 4
node cluster when smoke test is run on a non RS/Master host. (swagle)
Posted by sw...@apache.org.
AMBARI-4025. Smoke Tests for HBase fails on a 4 node cluster when smoke test is run on a non RS/Master host. (swagle)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/be6d0ef6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/be6d0ef6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/be6d0ef6
Branch: refs/heads/trunk
Commit: be6d0ef688527a7c582d24a562818513f187ff93
Parents: cc58951
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Dec 9 17:28:23 2013 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Mon Dec 9 17:31:42 2013 -0800
----------------------------------------------------------------------
.../main/puppet/modules/hdp-hbase/manifests/client.pp | 12 ++++++++++++
1 file changed, 12 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/be6d0ef6/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
index 69c4bb8..374b80b 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
@@ -23,6 +23,9 @@ class hdp-hbase::client(
$opts = {}
)
{
+ include hdp-hbase::params
+ $hbase_tmp_dir = $hdp-hbase::params::hbase_tmp_dir
+
#assumption is there are no other hbase components on node
if ($service_state == 'no_op') {
} elsif ($service_state in ['installed_and_configured','uninstalled']) {
@@ -32,6 +35,15 @@ class hdp-hbase::client(
type => 'client',
service_state => $service_state
}
+
+ hdp::directory_recursive_create_ignore_failure { $hbase_tmp_dir:
+ owner => $hdp-hbase::params::hbase_user,
+ context_tag => 'hbase_client',
+ service_state => $service_state,
+ force => true
+ }
+
+ Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure[ $hbase_tmp_dir ]
}
} else {
hdp_fail("TODO not implemented yet: service_state = ${service_state}")
[3/3] git commit: AMBARI-4027. Define RequestSchedule DAO and
relational schema. (swagle)
Posted by sw...@apache.org.
AMBARI-4027. Define RequestSchedule DAO and relational schema. (swagle)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/630983ab
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/630983ab
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/630983ab
Branch: refs/heads/trunk
Commit: 630983ab01e327a4a19c2f823f66ec98bbf47d2c
Parents: 7ce91da
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Dec 9 17:48:47 2013 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Mon Dec 9 17:48:47 2013 -0800
----------------------------------------------------------------------
.../orm/dao/RequestScheduleBatchHostDAO.java | 88 +++++
.../server/orm/dao/RequestScheduleDAO.java | 88 +++++
.../server/orm/entities/ClusterEntity.java | 11 +
.../RequestScheduleBatchHostEntity.java | 133 +++++++
.../RequestScheduleBatchHostEntityPK.java | 81 +++++
.../orm/entities/RequestScheduleEntity.java | 364 +++++++++++++++++++
.../main/resources/Ambari-DDL-MySQL-CREATE.sql | 178 +++++++++
.../main/resources/Ambari-DDL-Oracle-CREATE.sql | 192 +++++++++-
.../resources/Ambari-DDL-Postgres-CREATE.sql | 198 +++++++++-
.../Ambari-DDL-Postgres-REMOTE-CREATE.sql | 188 ++++++++++
.../src/main/resources/META-INF/persistence.xml | 2 +
.../server/orm/dao/RequestScheduleDAOTest.java | 140 +++++++
12 files changed, 1659 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchHostDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchHostDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchHostDAO.java
new file mode 100644
index 0000000..feabf9d
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchHostDAO.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntityPK;
+import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
+
+@Singleton
+public class RequestScheduleBatchHostDAO {
+ @Inject
+ Provider<EntityManager> entityManagerProvider;
+ @Inject
+ DaoUtils daoUtils;
+
+ @Transactional
+ public RequestScheduleBatchHostEntity findByPK
+ (RequestScheduleBatchHostEntityPK requestScheduleBatchHostEntityPK) {
+ return entityManagerProvider.get().find(RequestScheduleBatchHostEntity
+ .class, requestScheduleBatchHostEntityPK);
+ }
+
+ @Transactional
+ public List<RequestScheduleBatchHostEntity> findBySchedule(Long scheduleId) {
+ TypedQuery<RequestScheduleBatchHostEntity> query = entityManagerProvider
+ .get().createNamedQuery("batchHostsBySchedule",
+ RequestScheduleBatchHostEntity.class);
+
+ query.setParameter("id", scheduleId);
+ try {
+ return query.getResultList();
+ } catch (NoResultException ignored) {
+ }
+ return null;
+ }
+
+ @Transactional
+ public void create(RequestScheduleBatchHostEntity batchHostEntity) {
+ entityManagerProvider.get().persist(batchHostEntity);
+ }
+
+ @Transactional
+ public RequestScheduleBatchHostEntity merge(RequestScheduleBatchHostEntity batchHostEntity) {
+ return entityManagerProvider.get().merge(batchHostEntity);
+ }
+
+ @Transactional
+ public void refresh(RequestScheduleBatchHostEntity batchHostEntity) {
+ entityManagerProvider.get().refresh(batchHostEntity);
+ }
+
+ @Transactional
+ public void remove(RequestScheduleBatchHostEntity batchHostEntity) {
+ entityManagerProvider.get().remove(batchHostEntity);
+ }
+
+ @Transactional
+ public void removeBySchedule(Long scheduleId) {
+ TypedQuery<Long> query = entityManagerProvider.get().createQuery(
+ "DELETE FROM RequestScheduleBatchHostEntity batchHosts WHERE " +
+ "batchHosts.scheduleId = ?1", Long.class);
+
+ daoUtils.executeUpdate(query, scheduleId);
+ entityManagerProvider.get().flush();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java
new file mode 100644
index 0000000..0c9b75e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import javax.persistence.EntityManager;
+import javax.persistence.NoResultException;
+import javax.persistence.TypedQuery;
+import java.util.List;
+
+@Singleton
+public class RequestScheduleDAO {
+ @Inject
+ Provider<EntityManager> entityManagerProvider;
+
+ @Transactional
+ public RequestScheduleEntity findById(Long id) {
+ return entityManagerProvider.get().find(RequestScheduleEntity.class, id);
+ }
+
+ @Transactional
+ public List<RequestScheduleEntity> findByStatus(String status) {
+ TypedQuery<RequestScheduleEntity> query = entityManagerProvider.get()
+ .createNamedQuery("reqScheduleByStatus", RequestScheduleEntity.class);
+ query.setParameter("status", status);
+ try {
+ return query.getResultList();
+ } catch (NoResultException ignored) {
+ return null;
+ }
+ }
+
+ @Transactional
+ public List<RequestScheduleEntity> findAll() {
+ TypedQuery<RequestScheduleEntity> query = entityManagerProvider.get()
+ .createNamedQuery("allReqSchedules", RequestScheduleEntity.class);
+
+ try {
+ return query.getResultList();
+ } catch (NoResultException ignored) {
+ return null;
+ }
+ }
+
+ @Transactional
+ public void create(RequestScheduleEntity requestScheduleEntity) {
+ entityManagerProvider.get().persist(requestScheduleEntity);
+ }
+
+ @Transactional
+ public RequestScheduleEntity merge(RequestScheduleEntity requestScheduleEntity) {
+ return entityManagerProvider.get().merge(requestScheduleEntity);
+ }
+
+ @Transactional
+ public void remove(RequestScheduleEntity requestScheduleEntity) {
+ entityManagerProvider.get().remove(requestScheduleEntity);
+ }
+
+ @Transactional
+ public void removeByPK(Long id) {
+ entityManagerProvider.get().remove(findById(id));
+ }
+
+ @Transactional
+ public void refresh(RequestScheduleEntity requestScheduleEntity) {
+ entityManagerProvider.get().refresh(requestScheduleEntity);
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
index 0ea5614..55f7ee7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
@@ -85,6 +85,9 @@ public class ClusterEntity {
@OneToMany(mappedBy = "clusterEntity", cascade = CascadeType.ALL)
private Collection<ConfigGroupEntity> configGroupEntities;
+ @OneToMany(mappedBy = "clusterEntity", cascade = CascadeType.ALL)
+ private Collection<RequestScheduleEntity> requestScheduleEntities;
+
public Long getClusterId() {
return clusterId;
}
@@ -208,4 +211,12 @@ public class ClusterEntity {
public void setConfigGroupEntities(Collection<ConfigGroupEntity> configGroupEntities) {
this.configGroupEntities = configGroupEntities;
}
+
+ public Collection<RequestScheduleEntity> getRequestScheduleEntities() {
+ return requestScheduleEntities;
+ }
+
+ public void setRequestScheduleEntities(Collection<RequestScheduleEntity> requestScheduleEntities) {
+ this.requestScheduleEntities = requestScheduleEntities;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntity.java
new file mode 100644
index 0000000..5942cf5
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntity.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.JoinColumn;
+import javax.persistence.JoinColumns;
+import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
+import javax.persistence.Table;
+
+@Table(name = "requestschedulebatchhost")
+@Entity
+@NamedQueries({
+ @NamedQuery(name = "batchHostsBySchedule", query =
+ "SELECT batchHost FROM RequestScheduleBatchHostEntity batchHost " +
+ "WHERE batchHost.scheduleId=:id")
+})
+public class RequestScheduleBatchHostEntity {
+
+ @Id
+ @Column(name = "schedule_id", nullable = false, insertable = true, updatable = true)
+ private Long scheduleId;
+
+ @Id
+ @Column(name = "batch_id", nullable = false, insertable = true, updatable = true)
+ private Long batchId;
+
+ @Id
+ @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
+ private String hostName;
+
+ @Column(name = "batch_name")
+ private String batchName;
+
+ @ManyToOne
+ @JoinColumns({
+ @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false, insertable = false, updatable = false) })
+ private HostEntity hostEntity;
+
+ @ManyToOne
+ @JoinColumns({
+ @JoinColumn(name = "schedule_id", referencedColumnName = "schedule_id", nullable = false, insertable = false, updatable = false) })
+ private RequestScheduleEntity requestScheduleEntity;
+
+ public Long getScheduleId() {
+ return scheduleId;
+ }
+
+ public void setScheduleId(Long scheduleId) {
+ this.scheduleId = scheduleId;
+ }
+
+ public Long getBatchId() {
+ return batchId;
+ }
+
+ public void setBatchId(Long batchId) {
+ this.batchId = batchId;
+ }
+
+ public String getHostName() {
+ return hostName;
+ }
+
+ public void setHostName(String hostName) {
+ this.hostName = hostName;
+ }
+
+ public String getBatchName() {
+ return batchName;
+ }
+
+ public void setBatchName(String batchName) {
+ this.batchName = batchName;
+ }
+
+ public HostEntity getHostEntity() {
+ return hostEntity;
+ }
+
+ public void setHostEntity(HostEntity hostEntity) {
+ this.hostEntity = hostEntity;
+ }
+
+ public RequestScheduleEntity getRequestScheduleEntity() {
+ return requestScheduleEntity;
+ }
+
+ public void setRequestScheduleEntity(RequestScheduleEntity requestScheduleEntity) {
+ this.requestScheduleEntity = requestScheduleEntity;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RequestScheduleBatchHostEntity that = (RequestScheduleBatchHostEntity) o;
+
+ if (!batchId.equals(that.batchId)) return false;
+ if (!hostName.equals(that.hostName)) return false;
+ if (!scheduleId.equals(that.scheduleId)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = scheduleId.hashCode();
+ result = 31 * result + batchId.hashCode();
+ result = 31 * result + hostName.hashCode();
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntityPK.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntityPK.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntityPK.java
new file mode 100644
index 0000000..01d576d
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntityPK.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.entities;
+
+
+import javax.persistence.Column;
+import javax.persistence.Id;
+import java.io.Serializable;
+
+public class RequestScheduleBatchHostEntityPK implements Serializable {
+ private Long scheduleId;
+ private Long batchId;
+ private String hostName;
+
+ @Id
+ @Column(name = "schedule_id", nullable = false, insertable = true, updatable = true)
+ public Long getScheduleId() {
+ return scheduleId;
+ }
+
+ public void setScheduleId(Long scheduleId) {
+ this.scheduleId = scheduleId;
+ }
+
+ @Id
+ @Column(name = "batch_id", nullable = false, insertable = true, updatable = true)
+ public Long getBatchId() {
+ return batchId;
+ }
+
+ public void setBatchId(Long batchId) {
+ this.batchId = batchId;
+ }
+
+ @Id
+ @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
+ public String getHostName() {
+ return hostName;
+ }
+
+ public void setHostName(String hostName) {
+ this.hostName = hostName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RequestScheduleBatchHostEntityPK that = (RequestScheduleBatchHostEntityPK) o;
+
+ if (!batchId.equals(that.batchId)) return false;
+ if (!hostName.equals(that.hostName)) return false;
+ if (!scheduleId.equals(that.scheduleId)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = scheduleId.hashCode();
+ result = 31 * result + batchId.hashCode();
+ result = 31 * result + hostName.hashCode();
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java
new file mode 100644
index 0000000..a54c22c
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.CascadeType;
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.JoinColumn;
+import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
+import javax.persistence.OneToMany;
+import javax.persistence.Table;
+import java.util.Collection;
+
+@Table(name = "requestschedule")
+@Entity
+@NamedQueries({
+ @NamedQuery(name = "allReqSchedules", query =
+ "SELECT reqSchedule FROM RequestScheduleEntity reqSchedule"),
+ @NamedQuery(name = "reqScheduleByStatus", query =
+ "SELECT reqSchedule FROM RequestScheduleEntity reqSchedule " +
+ "WHERE reqSchedule.status=:status")
+})
+public class RequestScheduleEntity {
+
+ @Id
+ @Column(name = "schedule_id", nullable = false, insertable = true, updatable = true)
+ @GeneratedValue(strategy = GenerationType.TABLE, generator = "schedule_id_generator")
+ private long scheduleId;
+
+ @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
+ private Long clusterId;
+
+ @Column(name = "request_context")
+ private String requestContext;
+
+ @Column(name = "status")
+ private String status;
+
+ @Column(name = "target_type")
+ private String targetType;
+
+ @Column(name = "target_name")
+ private String targetName;
+
+ @Column(name = "target_service")
+ private String targetService;
+
+ @Column(name = "target_component")
+ private String targetComponent;
+
+ @Column(name = "batch_requests_by_host")
+ private boolean batchRequestByHost;
+
+ @Column(name = "batch_host_count")
+ private Integer batchHostCount;
+
+ @Column(name = "batch_separation_minutes")
+ private Integer batchSeparationInMinutes;
+
+ @Column(name = "batch_toleration_limit")
+ private Integer batchTolerationLimit;
+
+ @Column(name = "create_user")
+ private String createUser;
+
+ @Column(name = "create_timestamp")
+ private Long createTimestamp;
+
+ @Column(name = "update_user")
+ protected String updateUser;
+
+ @Column(name = "update_timestamp")
+ private Long updateTimestamp;
+
+ @Column(name = "minutes")
+ private String minutes;
+
+ @Column(name = "hours")
+ private String hours;
+
+ @Column(name = "days_of_month")
+ private String daysOfMonth;
+
+ @Column(name = "month")
+ private String month;
+
+ @Column(name = "day_of_week")
+ private String dayOfWeek;
+
+ @Column(name = "yearToSchedule")
+ private String year;
+
+ @Column(name = "starttime")
+ private Long startTime;
+
+ @Column(name = "endtime")
+ private Long endTime;
+
+ @Column(name = "last_execution_status")
+ private String lastExecutionStatus;
+
+ @ManyToOne
+ @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
+ private ClusterEntity clusterEntity;
+
+ @OneToMany(mappedBy = "requestScheduleEntity", cascade = CascadeType.ALL)
+ private Collection<RequestScheduleBatchHostEntity> requestScheduleBatchHostEntities;
+
+ public long getScheduleId() {
+ return scheduleId;
+ }
+
+ public void setScheduleId(long scheduleId) {
+ this.scheduleId = scheduleId;
+ }
+
+ public Long getClusterId() {
+ return clusterId;
+ }
+
+ public void setClusterId(Long clusterId) {
+ this.clusterId = clusterId;
+ }
+
+ public String getRequestContext() {
+ return requestContext;
+ }
+
+ public void setRequestContext(String request_context) {
+ this.requestContext = request_context;
+ }
+
+ public String getStatus() {
+ return status;
+ }
+
+ public void setStatus(String status) {
+ this.status = status;
+ }
+
+ public String getTargetType() {
+ return targetType;
+ }
+
+ public void setTargetType(String targetType) {
+ this.targetType = targetType;
+ }
+
+ public String getTargetName() {
+ return targetName;
+ }
+
+ public void setTargetName(String targetName) {
+ this.targetName = targetName;
+ }
+
+ public String getTargetService() {
+ return targetService;
+ }
+
+ public void setTargetService(String targetService) {
+ this.targetService = targetService;
+ }
+
+ public String getTargetComponent() {
+ return targetComponent;
+ }
+
+ public void setTargetComponent(String targetComponent) {
+ this.targetComponent = targetComponent;
+ }
+
+ public boolean getIsBatchRequestByHost() {
+ return batchRequestByHost;
+ }
+
+ public void setBatchRequestByHost(boolean batchRequestByHost) {
+ this.batchRequestByHost = batchRequestByHost;
+ }
+
+ public Integer getBatchHostCount() {
+ return batchHostCount;
+ }
+
+ public void setBatchHostCount(Integer batchHostCount) {
+ this.batchHostCount = batchHostCount;
+ }
+
+ public Integer getBatchSeparationInMinutes() {
+ return batchSeparationInMinutes;
+ }
+
+ public void setBatchSeparationInMinutes(Integer batchSeparationInMinutes) {
+ this.batchSeparationInMinutes = batchSeparationInMinutes;
+ }
+
+ public Integer getBatchTolerationLimit() {
+ return batchTolerationLimit;
+ }
+
+ public void setBatchTolerationLimit(Integer batchTolerationLimit) {
+ this.batchTolerationLimit = batchTolerationLimit;
+ }
+
+ public String getCreateUser() {
+ return createUser;
+ }
+
+ public void setCreateUser(String createUser) {
+ this.createUser = createUser;
+ }
+
+ public Long getCreateTimestamp() {
+ return createTimestamp;
+ }
+
+ public void setCreateTimestamp(Long createTimestamp) {
+ this.createTimestamp = createTimestamp;
+ }
+
+ public String getUpdateUser() {
+ return updateUser;
+ }
+
+ public void setUpdateUser(String updateUser) {
+ this.updateUser = updateUser;
+ }
+
+ public Long getUpdateTimestamp() {
+ return updateTimestamp;
+ }
+
+ public void setUpdateTimestamp(Long updateTimestamp) {
+ this.updateTimestamp = updateTimestamp;
+ }
+
+ public String getMinutes() {
+ return minutes;
+ }
+
+ public void setMinutes(String minutes) {
+ this.minutes = minutes;
+ }
+
+ public String getHours() {
+ return hours;
+ }
+
+ public void setHours(String hours) {
+ this.hours = hours;
+ }
+
+ public String getDaysOfMonth() {
+ return daysOfMonth;
+ }
+
+ public void setDaysOfMonth(String daysOfMonth) {
+ this.daysOfMonth = daysOfMonth;
+ }
+
+ public String getMonth() {
+ return month;
+ }
+
+ public void setMonth(String month) {
+ this.month = month;
+ }
+
+ public String getDayOfWeek() {
+ return dayOfWeek;
+ }
+
+ public void setDayOfWeek(String dayOfWeek) {
+ this.dayOfWeek = dayOfWeek;
+ }
+
+ public String getYear() {
+ return year;
+ }
+
+ public void setYear(String year) {
+ this.year = year;
+ }
+
+ public Long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(Long startTime) {
+ this.startTime = startTime;
+ }
+
+ public Long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(Long endTime) {
+ this.endTime = endTime;
+ }
+
+ public String getLastExecutionStatus() {
+ return lastExecutionStatus;
+ }
+
+ public void setLastExecutionStatus(String lastExecutionStatus) {
+ this.lastExecutionStatus = lastExecutionStatus;
+ }
+
+ public ClusterEntity getClusterEntity() {
+ return clusterEntity;
+ }
+
+ public void setClusterEntity(ClusterEntity clusterEntity) {
+ this.clusterEntity = clusterEntity;
+ }
+
+ public Collection<RequestScheduleBatchHostEntity> getRequestScheduleBatchHostEntities() {
+ return requestScheduleBatchHostEntities;
+ }
+
+ public void setRequestScheduleBatchHostEntities(Collection<RequestScheduleBatchHostEntity> requestScheduleBatchHostEntities) {
+ this.requestScheduleBatchHostEntities = requestScheduleBatchHostEntities;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RequestScheduleEntity that = (RequestScheduleEntity) o;
+
+ if (scheduleId != that.scheduleId) return false;
+ if (!clusterId.equals(that.clusterId)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = (int) (scheduleId ^ (scheduleId >>> 32));
+ result = 31 * result + clusterId.hashCode();
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 4accd0a..27c77e8 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -52,6 +52,8 @@ CREATE TABLE ambari_sequences (sequence_name VARCHAR(50) NOT NULL, value DECIMAL
CREATE TABLE confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
CREATE TABLE configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, PRIMARY KEY(group_id));
CREATE TABLE configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
@@ -80,11 +82,15 @@ ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconf
ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);
INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, value) values ('configgroup_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, value) values ('requestschedule_id_seq', 1);
insert into roles(role_name)
select 'admin'
@@ -176,6 +182,178 @@ CREATE TABLE clusterEvent (
host TEXT, rack TEXT
);
+-- org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+
+DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS;
+DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE;
+DROP TABLE IF EXISTS QRTZ_LOCKS;
+DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_TRIGGERS;
+DROP TABLE IF EXISTS QRTZ_JOB_DETAILS;
+DROP TABLE IF EXISTS QRTZ_CALENDARS;
+
+
+CREATE TABLE QRTZ_JOB_DETAILS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE VARCHAR(1) NOT NULL,
+ IS_NONCONCURRENT VARCHAR(1) NOT NULL,
+ IS_UPDATE_DATA VARCHAR(1) NOT NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
+ JOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE QRTZ_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME BIGINT(13) NULL,
+ PREV_FIRE_TIME BIGINT(13) NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME BIGINT(13) NOT NULL,
+ END_TIME BIGINT(13) NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT(2) NULL,
+ JOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE QRTZ_SIMPLE_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT BIGINT(7) NOT NULL,
+ REPEAT_INTERVAL BIGINT(12) NOT NULL,
+ TIMES_TRIGGERED BIGINT(10) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_CRON_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(200) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_SIMPROP_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INT NULL,
+ INT_PROP_2 INT NULL,
+ LONG_PROP_1 BIGINT NULL,
+ LONG_PROP_2 BIGINT NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 VARCHAR(1) NULL,
+ BOOL_PROP_2 VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_BLOB_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA BLOB NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_CALENDARS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR BLOB NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+
+CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_FIRED_TRIGGERS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME BIGINT(13) NOT NULL,
+ SCHED_TIME BIGINT(13) NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT VARCHAR(1) NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+);
+
+CREATE TABLE QRTZ_SCHEDULER_STATE
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME BIGINT(13) NOT NULL,
+ CHECKIN_INTERVAL BIGINT(13) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+);
+
+CREATE TABLE QRTZ_LOCKS
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_t_nf_st on qrtz_triggers(TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_ft_trig_name on qrtz_fired_triggers(TRIGGER_NAME);
+create index idx_qrtz_ft_trig_group on qrtz_fired_triggers(TRIGGER_GROUP);
+create index idx_qrtz_ft_trig_n_g on qrtz_fired_triggers(TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_job_name on qrtz_fired_triggers(JOB_NAME);
+create index idx_qrtz_ft_job_group on qrtz_fired_triggers(JOB_GROUP);
+create index idx_qrtz_t_next_fire_time_misfire on qrtz_triggers(MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nf_st_misfire on qrtz_triggers(MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nf_st_misfire_grp on qrtz_triggers(MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+commit;
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index fd0dec2..74a4607 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -43,7 +43,8 @@ CREATE TABLE configgroup (group_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, g
CREATE TABLE confgroupclusterconfigmapping (config_group_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_type VARCHAR2(255) NOT NULL, version_tag VARCHAR2(255) NOT NULL, user_name VARCHAR2(255) DEFAULT '_db', create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
CREATE TABLE configgrouphostmapping (config_group_id NUMBER(19) NOT NULL, host_name VARCHAR2(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
CREATE TABLE action (action_name VARCHAR2(255) NOT NULL, action_type VARCHAR2(255) NOT NULL, inputs VARCHAR2(1024), target_service VARCHAR2(255), target_component VARCHAR2(255), default_timeout NUMBER(10) NOT NULL, description VARCHAR2(1024), target_type VARCHAR2(255), PRIMARY KEY (action_name));
-
+CREATE TABLE ambari.requestschedule (schedule_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, request_context VARCHAR2(255), status VARCHAR2(255), target_type VARCHAR2(255), target_name VARCHAR2(255) NOT NULL, target_service VARCHAR2(255) NOT NULL, target_component VARCHAR2(255), batch_requests_by_host char check (batch_requests_by_host in ('FALSE','TRUE')), batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user VARCHAR2(255), create_timestamp NUMBER(19), update_user VARCHAR2(255), update_timestamp NUMBER(19), minutes VARCHAR2(10), hours VARCHAR2(10), days_of_month VARCHAR2(10), month VARCHAR2(10), day_of_week VARCHAR2(10), yearToSchedule VARCHAR2(10), startTime NUMBER(19), endTime NUMBER(19), last_execution_status VARCHAR2(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchhost (schedule_id NUMBER(19), batch_id NUMBER(19), host_name VARCHAR2(255), batch_name VARCHAR2(255), PRIMARY KEY(schedule_id, batch_id, host_name));
ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
@@ -72,13 +73,15 @@ ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconf
ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
ALTER TABLE confgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
ALTER TABLE confgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
-
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);
INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);
INSERT INTO ambari_sequences(sequence_name, value) values ('configgroup_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, value) values ('requestschedule_id_seq', 1);
INSERT INTO metainfo("metainfo_key", "metainfo_value") values ('version', '${ambariVersion}');
insert into Roles(role_name)
@@ -172,3 +175,188 @@ CREATE TABLE clusterEvent (
host VARCHAR2(4000), rack VARCHAR2(4000)
);
+-- Quartz tables
+
+delete from qrtz_fired_triggers;
+delete from qrtz_simple_triggers;
+delete from qrtz_simprop_triggers;
+delete from qrtz_cron_triggers;
+delete from qrtz_blob_triggers;
+delete from qrtz_triggers;
+delete from qrtz_job_details;
+delete from qrtz_calendars;
+delete from qrtz_paused_trigger_grps;
+delete from qrtz_locks;
+delete from qrtz_scheduler_state;
+
+drop table qrtz_calendars;
+drop table qrtz_fired_triggers;
+drop table qrtz_blob_triggers;
+drop table qrtz_cron_triggers;
+drop table qrtz_simple_triggers;
+drop table qrtz_simprop_triggers;
+drop table qrtz_triggers;
+drop table qrtz_job_details;
+drop table qrtz_paused_trigger_grps;
+drop table qrtz_locks;
+drop table qrtz_scheduler_state;
+
+
+CREATE TABLE qrtz_job_details
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ JOB_NAME VARCHAR2(200) NOT NULL,
+ JOB_GROUP VARCHAR2(200) NOT NULL,
+ DESCRIPTION VARCHAR2(250) NULL,
+ JOB_CLASS_NAME VARCHAR2(250) NOT NULL,
+ IS_DURABLE VARCHAR2(1) NOT NULL,
+ IS_NONCONCURRENT VARCHAR2(1) NOT NULL,
+ IS_UPDATE_DATA VARCHAR2(1) NOT NULL,
+ REQUESTS_RECOVERY VARCHAR2(1) NOT NULL,
+ JOB_DATA BLOB NULL,
+ CONSTRAINT QRTZ_JOB_DETAILS_PK PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+CREATE TABLE qrtz_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ JOB_NAME VARCHAR2(200) NOT NULL,
+ JOB_GROUP VARCHAR2(200) NOT NULL,
+ DESCRIPTION VARCHAR2(250) NULL,
+ NEXT_FIRE_TIME NUMBER(13) NULL,
+ PREV_FIRE_TIME NUMBER(13) NULL,
+ PRIORITY NUMBER(13) NULL,
+ TRIGGER_STATE VARCHAR2(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR2(8) NOT NULL,
+ START_TIME NUMBER(13) NOT NULL,
+ END_TIME NUMBER(13) NULL,
+ CALENDAR_NAME VARCHAR2(200) NULL,
+ MISFIRE_INSTR NUMBER(2) NULL,
+ JOB_DATA BLOB NULL,
+ CONSTRAINT QRTZ_TRIGGERS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ CONSTRAINT QRTZ_TRIGGER_TO_JOBS_FK FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+CREATE TABLE qrtz_simple_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ REPEAT_COUNT NUMBER(7) NOT NULL,
+ REPEAT_INTERVAL NUMBER(12) NOT NULL,
+ TIMES_TRIGGERED NUMBER(10) NOT NULL,
+ CONSTRAINT QRTZ_SIMPLE_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ CONSTRAINT QRTZ_SIMPLE_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+CREATE TABLE qrtz_cron_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR2(120) NOT NULL,
+ TIME_ZONE_ID VARCHAR2(80),
+ CONSTRAINT QRTZ_CRON_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ CONSTRAINT QRTZ_CRON_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+CREATE TABLE qrtz_simprop_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ STR_PROP_1 VARCHAR2(512) NULL,
+ STR_PROP_2 VARCHAR2(512) NULL,
+ STR_PROP_3 VARCHAR2(512) NULL,
+ INT_PROP_1 NUMBER(10) NULL,
+ INT_PROP_2 NUMBER(10) NULL,
+ LONG_PROP_1 NUMBER(13) NULL,
+ LONG_PROP_2 NUMBER(13) NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 VARCHAR2(1) NULL,
+ BOOL_PROP_2 VARCHAR2(1) NULL,
+ CONSTRAINT QRTZ_SIMPROP_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ CONSTRAINT QRTZ_SIMPROP_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+CREATE TABLE qrtz_blob_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ BLOB_DATA BLOB NULL,
+ CONSTRAINT QRTZ_BLOB_TRIG_PK PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ CONSTRAINT QRTZ_BLOB_TRIG_TO_TRIG_FK FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+CREATE TABLE qrtz_calendars
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ CALENDAR_NAME VARCHAR2(200) NOT NULL,
+ CALENDAR BLOB NOT NULL,
+ CONSTRAINT QRTZ_CALENDARS_PK PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+CREATE TABLE qrtz_paused_trigger_grps
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ CONSTRAINT QRTZ_PAUSED_TRIG_GRPS_PK PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+);
+CREATE TABLE qrtz_fired_triggers
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ ENTRY_ID VARCHAR2(95) NOT NULL,
+ TRIGGER_NAME VARCHAR2(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR2(200) NOT NULL,
+ INSTANCE_NAME VARCHAR2(200) NOT NULL,
+ FIRED_TIME NUMBER(13) NOT NULL,
+ SCHED_TIME NUMBER(13) NOT NULL,
+ PRIORITY NUMBER(13) NOT NULL,
+ STATE VARCHAR2(16) NOT NULL,
+ JOB_NAME VARCHAR2(200) NULL,
+ JOB_GROUP VARCHAR2(200) NULL,
+ IS_NONCONCURRENT VARCHAR2(1) NULL,
+ REQUESTS_RECOVERY VARCHAR2(1) NULL,
+ CONSTRAINT QRTZ_FIRED_TRIGGER_PK PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+);
+CREATE TABLE qrtz_scheduler_state
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ INSTANCE_NAME VARCHAR2(200) NOT NULL,
+ LAST_CHECKIN_TIME NUMBER(13) NOT NULL,
+ CHECKIN_INTERVAL NUMBER(13) NOT NULL,
+ CONSTRAINT QRTZ_SCHEDULER_STATE_PK PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+);
+CREATE TABLE qrtz_locks
+ (
+ SCHED_NAME VARCHAR2(120) NOT NULL,
+ LOCK_NAME VARCHAR2(40) NOT NULL,
+ CONSTRAINT QRTZ_LOCKS_PK PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 8ffb047..a05169a 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -107,10 +107,15 @@ GRANT ALL PRIVILEGES ON TABLE ambari.confgroupclusterconfigmapping TO :username;
CREATE TABLE ambari.configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
GRANT ALL PRIVILEGES ON TABLE ambari.configgrouphostmapping TO :username;
-CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCHAR(32) NOT NULL, inputs VARCHAR(1000),
-target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
+CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCHAR(32) NOT NULL, inputs VARCHAR(1000), target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
GRANT ALL PRIVILEGES ON TABLE ambari.action TO :username;
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+GRANT ALL PRIVILEGES ON TABLE ambari.requestschedule TO :username;
+
+CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
+GRANT ALL PRIVILEGES ON TABLE ambari.requestschedulebatchhost TO :username;
+
--------altering tables by creating foreign keys----------
ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
@@ -139,6 +144,8 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclus
ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
---------inserting some data-----------
BEGIN;
@@ -150,6 +157,8 @@ BEGIN;
SELECT 'host_role_command_id_seq', 1
union all
select 'configgroup_id_seq', 1;
+ union all
+ select 'requestschedule_id_seq', 1;
INSERT INTO ambari.Roles (role_name)
SELECT 'admin'
@@ -260,3 +269,188 @@ CREATE TABLE clusterEvent (
host TEXT, rack TEXT
);
GRANT ALL PRIVILEGES ON TABLE clusterEvent TO "mapred";
+
+-- Quartz tables
+
+drop table qrtz_fired_triggers;
+DROP TABLE QRTZ_PAUSED_TRIGGER_GRPS;
+DROP TABLE QRTZ_SCHEDULER_STATE;
+DROP TABLE QRTZ_LOCKS;
+drop table qrtz_simple_triggers;
+drop table qrtz_cron_triggers;
+drop table qrtz_simprop_triggers;
+DROP TABLE QRTZ_BLOB_TRIGGERS;
+drop table qrtz_triggers;
+drop table qrtz_job_details;
+drop table qrtz_calendars;
+
+CREATE TABLE qrtz_job_details
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE BOOL NOT NULL,
+ IS_NONCONCURRENT BOOL NOT NULL,
+ IS_UPDATE_DATA BOOL NOT NULL,
+ REQUESTS_RECOVERY BOOL NOT NULL,
+ JOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME BIGINT NULL,
+ PREV_FIRE_TIME BIGINT NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME BIGINT NOT NULL,
+ END_TIME BIGINT NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT NULL,
+ JOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_simple_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT BIGINT NOT NULL,
+ REPEAT_INTERVAL BIGINT NOT NULL,
+ TIMES_TRIGGERED BIGINT NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_cron_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(120) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_simprop_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INT NULL,
+ INT_PROP_2 INT NULL,
+ LONG_PROP_1 BIGINT NULL,
+ LONG_PROP_2 BIGINT NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 BOOL NULL,
+ BOOL_PROP_2 BOOL NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_blob_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_calendars
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR BYTEA NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+
+
+CREATE TABLE qrtz_paused_trigger_grps
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_fired_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME BIGINT NOT NULL,
+ SCHED_TIME BIGINT NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT BOOL NULL,
+ REQUESTS_RECOVERY BOOL NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+);
+
+CREATE TABLE qrtz_scheduler_state
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME BIGINT NOT NULL,
+ CHECKIN_INTERVAL BIGINT NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+);
+
+CREATE TABLE qrtz_locks
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
+
+commit;
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
index 5742566..e82cb3d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
@@ -45,6 +45,8 @@ CREATE TABLE ambari.ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, "v
CREATE TABLE ambari.configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, PRIMARY KEY(group_id));
CREATE TABLE ambari.confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
CREATE TABLE ambari.configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
@@ -73,6 +75,8 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclus
ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
+ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
BEGIN;
@@ -174,3 +178,187 @@ CREATE TABLE clusterEvent (
error TEXT, data TEXT ,
host TEXT, rack TEXT
);
+
+-- Quartz tables
+
+drop table qrtz_fired_triggers;
+DROP TABLE QRTZ_PAUSED_TRIGGER_GRPS;
+DROP TABLE QRTZ_SCHEDULER_STATE;
+DROP TABLE QRTZ_LOCKS;
+drop table qrtz_simple_triggers;
+drop table qrtz_cron_triggers;
+drop table qrtz_simprop_triggers;
+DROP TABLE QRTZ_BLOB_TRIGGERS;
+drop table qrtz_triggers;
+drop table qrtz_job_details;
+drop table qrtz_calendars;
+
+CREATE TABLE qrtz_job_details
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE BOOL NOT NULL,
+ IS_NONCONCURRENT BOOL NOT NULL,
+ IS_UPDATE_DATA BOOL NOT NULL,
+ REQUESTS_RECOVERY BOOL NOT NULL,
+ JOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME BIGINT NULL,
+ PREV_FIRE_TIME BIGINT NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME BIGINT NOT NULL,
+ END_TIME BIGINT NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT NULL,
+ JOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE qrtz_simple_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT BIGINT NOT NULL,
+ REPEAT_INTERVAL BIGINT NOT NULL,
+ TIMES_TRIGGERED BIGINT NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_cron_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(120) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_simprop_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INT NULL,
+ INT_PROP_2 INT NULL,
+ LONG_PROP_1 BIGINT NULL,
+ LONG_PROP_2 BIGINT NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 BOOL NULL,
+ BOOL_PROP_2 BOOL NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_blob_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA BYTEA NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_calendars
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR BYTEA NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+
+
+CREATE TABLE qrtz_paused_trigger_grps
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE qrtz_fired_triggers
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME BIGINT NOT NULL,
+ SCHED_TIME BIGINT NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT BOOL NULL,
+ REQUESTS_RECOVERY BOOL NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+);
+
+CREATE TABLE qrtz_scheduler_state
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME BIGINT NOT NULL,
+ CHECKIN_INTERVAL BIGINT NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+);
+
+CREATE TABLE qrtz_locks
+ (
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_j_req_recovery on qrtz_job_details(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on qrtz_job_details(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on qrtz_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on qrtz_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on qrtz_triggers(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on qrtz_triggers(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on qrtz_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on qrtz_triggers(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on qrtz_triggers(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on qrtz_triggers(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on qrtz_triggers(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on qrtz_fired_triggers(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on qrtz_fired_triggers(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on qrtz_fired_triggers(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on qrtz_fired_triggers(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on qrtz_fired_triggers(SCHED_NAME,TRIGGER_GROUP);
+
+commit;
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/main/resources/META-INF/persistence.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/META-INF/persistence.xml b/ambari-server/src/main/resources/META-INF/persistence.xml
index 34263a9..d56378b 100644
--- a/ambari-server/src/main/resources/META-INF/persistence.xml
+++ b/ambari-server/src/main/resources/META-INF/persistence.xml
@@ -38,6 +38,8 @@
<class>org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity</class>
<class>org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity</class>
<class>org.apache.ambari.server.orm.entities.ActionEntity</class>
+ <class>org.apache.ambari.server.orm.entities.RequestScheduleEntity</class>
+ <class>org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity</class>
<properties>
<!--<property name="javax.persistence.jdbc.url" value="jdbc:postgresql://localhost/ambari" />-->
http://git-wip-us.apache.org/repos/asf/ambari/blob/630983ab/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java
new file mode 100644
index 0000000..1c6bdb7
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+import junit.framework.Assert;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+public class RequestScheduleDAOTest {
+ private Injector injector;
+ private HostDAO hostDAO;
+ private ClusterDAO clusterDAO;
+ private RequestScheduleDAO requestScheduleDAO;
+ private RequestScheduleBatchHostDAO batchHostDAO;
+
+ @Before
+ public void setup() throws Exception {
+ injector = Guice.createInjector(new InMemoryDefaultTestModule());
+ injector.getInstance(GuiceJpaInitializer.class);
+
+ clusterDAO = injector.getInstance(ClusterDAO.class);
+ hostDAO = injector.getInstance(HostDAO.class);
+ clusterDAO = injector.getInstance(ClusterDAO.class);
+ requestScheduleDAO = injector.getInstance(RequestScheduleDAO.class);
+ batchHostDAO = injector.getInstance(RequestScheduleBatchHostDAO.class);
+ }
+
+ @After
+ public void teardown() throws AmbariException {
+ injector.getInstance(PersistService.class).stop();
+ }
+
+ private RequestScheduleEntity createScheduleEntity() {
+ RequestScheduleEntity scheduleEntity = new RequestScheduleEntity();
+
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterName("c1");
+ clusterDAO.create(clusterEntity);
+
+ scheduleEntity.setClusterEntity(clusterEntity);
+ scheduleEntity.setClusterId(clusterEntity.getClusterId());
+ scheduleEntity.setRequestContext("Test");
+ scheduleEntity.setStatus("SCHEDULED");
+ scheduleEntity.setTargetType("ACTION");
+ scheduleEntity.setTargetName("REBALANCE");
+ scheduleEntity.setTargetService("HDFS");
+ scheduleEntity.setTargetComponent("DATANODE");
+ scheduleEntity.setBatchRequestByHost(false);
+ scheduleEntity.setMinutes("30");
+ scheduleEntity.setHours("12");
+ scheduleEntity.setDayOfWeek("*");
+ scheduleEntity.setDaysOfMonth("*");
+ scheduleEntity.setYear("*");
+
+ requestScheduleDAO.create(scheduleEntity);
+
+ HostEntity hostEntity = new HostEntity();
+ hostEntity.setHostName("h1");
+ hostEntity.setOsType("centOS");
+ hostDAO.create(hostEntity);
+
+ RequestScheduleBatchHostEntity batchHostEntity = new
+ RequestScheduleBatchHostEntity();
+
+ batchHostEntity.setBatchId(1L);
+ batchHostEntity.setScheduleId(scheduleEntity.getScheduleId());
+ batchHostEntity.setRequestScheduleEntity(scheduleEntity);
+ batchHostEntity.setHostName(hostEntity.getHostName());
+ batchHostEntity.setRequestScheduleEntity(scheduleEntity);
+ batchHostDAO.create(batchHostEntity);
+
+ scheduleEntity.getRequestScheduleBatchHostEntities().add(batchHostEntity);
+ scheduleEntity = requestScheduleDAO.merge(scheduleEntity);
+
+ return scheduleEntity;
+ }
+
+ @Test
+ public void testCreateRequestSchedule() throws Exception {
+ RequestScheduleEntity scheduleEntity = createScheduleEntity();
+
+ Assert.assertTrue(scheduleEntity.getScheduleId() > 0);
+ Assert.assertEquals("SCHEDULED", scheduleEntity.getStatus());
+ Assert.assertEquals("REBALANCE", scheduleEntity.getTargetName());
+ Assert.assertEquals("HDFS", scheduleEntity.getTargetService());
+ Assert.assertEquals(false, scheduleEntity.getIsBatchRequestByHost());
+ Assert.assertEquals("12", scheduleEntity.getHours());
+ Assert.assertEquals("h1", scheduleEntity
+ .getRequestScheduleBatchHostEntities().iterator().next().getHostName());
+ }
+
+ @Test
+ public void testFindById() throws Exception {
+ RequestScheduleEntity scheduleEntity = createScheduleEntity();
+
+ RequestScheduleEntity testScheduleEntity = requestScheduleDAO.findById
+ (scheduleEntity.getScheduleId());
+
+ Assert.assertEquals(scheduleEntity, testScheduleEntity);
+ }
+
+ @Test
+ public void testFindByStatus() throws Exception {
+ RequestScheduleEntity scheduleEntity = createScheduleEntity();
+
+ List<RequestScheduleEntity> scheduleEntities = requestScheduleDAO
+ .findByStatus("SCHEDULED");
+
+ Assert.assertNotNull(scheduleEntities);
+ Assert.assertEquals(scheduleEntity, scheduleEntities.get(0));
+ }
+}