You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2015/08/23 22:41:57 UTC
[2/2] ambari git commit: AMBARI-12822. Implement SQL Anywhere as a
supported DB for Ambari. (mpapirkovskyy)
AMBARI-12822. Implement SQL Anywhere as a supported DB for Ambari. (mpapirkovskyy)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/49c7e74b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/49c7e74b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/49c7e74b
Branch: refs/heads/branch-2.1
Commit: 49c7e74b3d0c63d5fcbfcb2b3b05967edb05e9cb
Parents: d3776de
Author: Myroslav Papirkovskyy <mp...@hortonworks.com>
Authored: Wed Aug 19 18:23:41 2015 +0300
Committer: Myroslav Papirkovskyy <mp...@hortonworks.com>
Committed: Sun Aug 23 23:41:11 2015 +0300
----------------------------------------------------------------------
.../server/configuration/Configuration.java | 5 +-
ambari-server/src/main/python/ambari-server.py | 3 +-
.../python/ambari_server/dbConfiguration.py | 53 +-
.../ambari_server/dbConfiguration_linux.py | 157 ++-
.../python/ambari_server/serverConfiguration.py | 6 +
.../main/python/ambari_server/serverSetup.py | 2 +-
.../src/main/python/ambari_server_main.py | 14 +-
.../resources/Ambari-DDL-SQLAnywhere-CREATE.sql | 1219 ++++++++++++++++++
.../resources/Ambari-DDL-SQLAnywhere-DROP.sql | 0
.../src/test/python/TestAmbariServer.py | 10 +-
10 files changed, 1430 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 08c835f..808cca4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -465,7 +465,8 @@ public class Configuration {
ORACLE("oracle"),
MYSQL("mysql"),
DERBY("derby"),
- SQL_SERVER("sqlserver");
+ SQL_SERVER("sqlserver"),
+ SQL_ANYWHERE("sqlanywhere");
private static final Map<String, DatabaseType> m_mappedTypes =
new HashMap<String, Configuration.DatabaseType>(5);
@@ -1667,6 +1668,8 @@ public class Configuration {
databaseType = DatabaseType.DERBY;
} else if (dbUrl.contains(DatabaseType.SQL_SERVER.getName())) {
databaseType = DatabaseType.SQL_SERVER;
+ } else if (dbUrl.contains(DatabaseType.SQL_ANYWHERE.getName())) {
+ databaseType = DatabaseType.SQL_ANYWHERE;
} else {
throw new RuntimeException(
"The database type could be not determined from the JDBC URL "
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 524db30..db6f7cc 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -348,7 +348,7 @@ def init_parser_options(parser):
dest="ldap_sync_users")
parser.add_option('--groups', default=None, help="LDAP sync groups option. Specifies the path to a CSV file of group names to be synchronized.",
dest="ldap_sync_groups")
- parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres", dest="dbms")
+ parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
parser.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
parser.add_option('--databaseport', default=None, help="Database port", dest="database_port")
parser.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
@@ -359,6 +359,7 @@ def init_parser_options(parser):
parser.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
parser.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
"Name sid|sname", dest="sid_or_sname")
+ parser.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
parser.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
"with all required files(jdbc jar, libraries and etc), for the " \
"database type specified with the --jdbc-db option. " \
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration.py b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
index a66738b..7a11cdc 100644
--- a/ambari-server/src/main/python/ambari_server/dbConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
@@ -39,8 +39,9 @@ SETUP_DB_CONNECT_ATTEMPTS = 3
USERNAME_PATTERN = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
PASSWORD_PATTERN = "^[a-zA-Z0-9_-]*$"
-DATABASE_NAMES = ["postgres", "oracle", "mysql", "mssql"]
-DATABASE_FULL_NAMES = {"oracle": "Oracle", "mysql": "MySQL", "mssql": "Microsoft SQL Server", "postgres": "PostgreSQL"}
+DATABASE_NAMES = ["postgres", "oracle", "mysql", "mssql", "sqlanywhere"]
+DATABASE_FULL_NAMES = {"oracle": "Oracle", "mysql": "MySQL", "mssql": "Microsoft SQL Server", "postgres":
+ "PostgreSQL", "sqlanywhere": "SQL Anywhere"}
AMBARI_DATABASE_NAME = "ambari"
AMBARI_DATABASE_TITLE = "ambari"
@@ -315,14 +316,16 @@ class DBMSConfigFactoryWindows(DBMSConfigFactory):
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class DBMSConfigFactoryLinux(DBMSConfigFactory):
def __init__(self):
- from ambari_server.dbConfiguration_linux import createPGConfig, createOracleConfig, createMySQLConfig, createMSSQLConfig
+ from ambari_server.dbConfiguration_linux import createPGConfig, createOracleConfig, createMySQLConfig, \
+ createMSSQLConfig, createSQLAConfig
self.DBMS_KEYS_LIST = [
'embedded',
'oracle',
'mysql',
'postgres',
- 'mssql'
+ 'mssql',
+ 'sqlanywhere'
]
self.DRIVER_KEYS_LIST = [
@@ -339,23 +342,26 @@ class DBMSConfigFactoryLinux(DBMSConfigFactory):
DBMSDesc(self.DBMS_KEYS_LIST[1], STORAGE_TYPE_REMOTE, 'Oracle', '', createOracleConfig),
DBMSDesc(self.DBMS_KEYS_LIST[2], STORAGE_TYPE_REMOTE, 'MySQL', '', createMySQLConfig),
DBMSDesc(self.DBMS_KEYS_LIST[3], STORAGE_TYPE_REMOTE, 'PostgreSQL', '', createPGConfig),
- DBMSDesc(self.DBMS_KEYS_LIST[4], STORAGE_TYPE_REMOTE, 'Microsoft SQL Server', 'Tech Preview', createMSSQLConfig)
+ DBMSDesc(self.DBMS_KEYS_LIST[4], STORAGE_TYPE_REMOTE, 'Microsoft SQL Server', 'Tech Preview', createMSSQLConfig),
+ DBMSDesc(self.DBMS_KEYS_LIST[5], STORAGE_TYPE_REMOTE, 'SQL Anywhere', '', createSQLAConfig)
]
self.DBMS_DICT = \
{
- '-' : 0,
- '-' + STORAGE_TYPE_LOCAL : 0,
- self.DBMS_KEYS_LIST[0] + '-' : 0,
- self.DBMS_KEYS_LIST[2] + '-' : 2,
- self.DBMS_KEYS_LIST[2] + '-' + STORAGE_TYPE_REMOTE : 2,
- self.DBMS_KEYS_LIST[4] + '-' : 4,
- self.DBMS_KEYS_LIST[4] + '-' + STORAGE_TYPE_REMOTE : 4,
- self.DBMS_KEYS_LIST[1] + '-' : 1,
- self.DBMS_KEYS_LIST[1] + '-' + STORAGE_TYPE_REMOTE : 1,
- self.DBMS_KEYS_LIST[3] + '-' : 3,
- self.DBMS_KEYS_LIST[3] + '-' + STORAGE_TYPE_LOCAL : 0,
- self.DBMS_KEYS_LIST[3] + '-' + STORAGE_TYPE_REMOTE : 3,
+ '-': 0,
+ '-' + STORAGE_TYPE_LOCAL: 0,
+ self.DBMS_KEYS_LIST[0] + '-': 0,
+ self.DBMS_KEYS_LIST[2] + '-': 2,
+ self.DBMS_KEYS_LIST[2] + '-' + STORAGE_TYPE_REMOTE: 2,
+ self.DBMS_KEYS_LIST[4] + '-': 4,
+ self.DBMS_KEYS_LIST[4] + '-' + STORAGE_TYPE_REMOTE: 4,
+ self.DBMS_KEYS_LIST[1] + '-': 1,
+ self.DBMS_KEYS_LIST[1] + '-' + STORAGE_TYPE_REMOTE: 1,
+ self.DBMS_KEYS_LIST[3] + '-': 3,
+ self.DBMS_KEYS_LIST[3] + '-' + STORAGE_TYPE_LOCAL: 0,
+ self.DBMS_KEYS_LIST[3] + '-' + STORAGE_TYPE_REMOTE: 3,
+ self.DBMS_KEYS_LIST[5] + '-': 5,
+ self.DBMS_KEYS_LIST[5] + '-' + STORAGE_TYPE_REMOTE: 5,
}
self.DBMS_PROMPT_PATTERN = "[{0}] - {1}{2}\n"
@@ -447,7 +453,7 @@ class DBMSConfigFactoryLinux(DBMSConfigFactory):
except KeyError:
# Unsupported database type (e.g. local Oracle, MySQL or MSSQL)
raise FatalException(15, "Invalid database selection: {0} {1}".format(
- getattr(options, "persistence_type", ""), getattr(options, "options.dbms", "")))
+ getattr(options, "persistence_type", ""), getattr(options, "dbms", "")))
return def_index
@@ -511,3 +517,14 @@ def ensure_jdbc_driver_is_installed(options, properties):
result = dbms._is_jdbc_driver_installed(properties)
if result == -1:
raise FatalException(-1, dbms.JDBC_DRIVER_INSTALL_MSG)
+ dbms._extract_client_tarball(properties)
+
+def get_native_libs_path(options, properties):
+ factory = DBMSConfigFactory()
+ dbms = factory.create(options, properties)
+ return dbms._get_native_libs(properties)
+
+def get_jdbc_driver_path(options, properties):
+ factory = DBMSConfigFactory()
+ dbms = factory.create(options, properties)
+ return dbms._get_default_driver_path(properties)
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
index 25958eb..53dba23 100644
--- a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
@@ -42,7 +42,7 @@ from ambari_server.serverConfiguration import encrypt_password, store_password_f
JDBC_DRIVER_PROPERTY, JDBC_URL_PROPERTY, \
JDBC_RCA_USER_NAME_PROPERTY, JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, \
JDBC_RCA_DRIVER_PROPERTY, JDBC_RCA_URL_PROPERTY, \
- PERSISTENCE_TYPE_PROPERTY
+ PERSISTENCE_TYPE_PROPERTY, JDBC_SQLA_SERVER_NAME
from ambari_server.userInput import get_YN_input, get_validated_string_input, read_password
from ambari_server.utils import get_postgre_hba_dir, get_postgre_running_status
@@ -151,6 +151,9 @@ class LinuxDBMSConfig(DBMSConfig):
'against the database to create the schema: ' + os.linesep +
client_usage_cmd_init + os.linesep)
+ def _get_default_driver_path(self, properties):
+ return os.path.join(configDefaults.JAVA_SHARE_PATH, self.driver_file_name)
+
def _install_jdbc_driver(self, properties, files_list):
if type(files_list) is not int:
print 'Copying JDBC drivers to server resources...'
@@ -159,7 +162,7 @@ class LinuxDBMSConfig(DBMSConfig):
db_name = self.dbms_full_name.lower()
symlink_name = db_name + "-jdbc-driver.jar"
jdbc_symlink = os.path.join(resources_dir, symlink_name)
- db_default_driver_path = os.path.join(configDefaults.JAVA_SHARE_PATH, self.driver_file_name)
+ db_default_driver_path = self._get_default_driver_path(properties)
if os.path.lexists(jdbc_symlink):
os.remove(jdbc_symlink)
@@ -228,6 +231,12 @@ class LinuxDBMSConfig(DBMSConfig):
return drivers
return -1
+ def _extract_client_tarball(self, properties):
+ pass
+
+ def _get_native_libs(self, properties):
+ return None
+
# Let the console user initialize the remote database schema
def _setup_remote_db(self):
setup_msg = "Before starting Ambari Server, you must run the following DDL " \
@@ -248,6 +257,22 @@ class LinuxDBMSConfig(DBMSConfig):
if encrypted_password != self.database_password:
properties.process_pair(property_name, encrypted_password)
+ def _get_database_hostname(self):
+ # fully qualify the hostname to make sure all the other hosts can connect
+ # to the jdbc hostname since its passed onto the agents for RCA
+ jdbc_hostname = self.database_host
+ if (self.database_host == "localhost"):
+ jdbc_hostname = socket.getfqdn()
+ return jdbc_hostname
+
+ def _get_jdbc_connection_string(self):
+ jdbc_hostname = self._get_database_hostname()
+
+ connectionStringFormat = self.database_url_pattern
+ if self.sid_or_sname == "sid":
+ connectionStringFormat = self.database_url_pattern_alt
+ return connectionStringFormat.format(jdbc_hostname, self.database_port, self.database_name)
+
# Store set of properties for remote database connection
def _store_remote_properties(self, properties):
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, self.persistence_type)
@@ -258,16 +283,9 @@ class LinuxDBMSConfig(DBMSConfig):
properties.process_pair(JDBC_DATABASE_NAME_PROPERTY, self.database_name)
properties.process_pair(JDBC_DRIVER_PROPERTY, self.driver_class_name)
- # fully qualify the hostname to make sure all the other hosts can connect
- # to the jdbc hostname since its passed onto the agents for RCA
- jdbc_hostname = self.database_host
- if (self.database_host == "localhost"):
- jdbc_hostname = socket.getfqdn()
- connectionStringFormat = self.database_url_pattern
- if self.sid_or_sname == "sid":
- connectionStringFormat = self.database_url_pattern_alt
- properties.process_pair(JDBC_URL_PROPERTY, connectionStringFormat.format(jdbc_hostname, self.database_port, self.database_name))
+ connection_string = self._get_jdbc_connection_string()
+ properties.process_pair(JDBC_URL_PROPERTY, connection_string)
properties.process_pair(JDBC_USER_NAME_PROPERTY, self.database_username)
self._store_password_property(properties, JDBC_PASSWORD_PROPERTY)
@@ -277,7 +295,7 @@ class LinuxDBMSConfig(DBMSConfig):
properties.process_pair(JDBC_PROPERTIES_PREFIX + pair[0], pair[1])
properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, self.driver_class_name)
- properties.process_pair(JDBC_RCA_URL_PROPERTY, connectionStringFormat.format(jdbc_hostname, self.database_port, self.database_name))
+ properties.process_pair(JDBC_RCA_URL_PROPERTY, connection_string)
properties.process_pair(JDBC_RCA_USER_NAME_PROPERTY, self.database_username)
self._store_password_property(properties, JDBC_RCA_PASSWORD_FILE_PROPERTY)
@@ -909,6 +927,10 @@ class MSSQLConfig(LinuxDBMSConfig):
def _is_jdbc_driver_installed(self, properties):
return LinuxDBMSConfig._find_jdbc_driver("*sqljdbc*.jar")
+ def _get_jdbc_driver_path(self, properties):
+ super(MSSQLConfig, self)._get_jdbc_driver_path(properties)
+
+
def _configure_database_name(self):
self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
return True
@@ -918,3 +940,114 @@ class MSSQLConfig(LinuxDBMSConfig):
def createMSSQLConfig(options, properties, storage_type, dbId):
return MSSQLConfig(options, properties, storage_type)
+
+class SQLAConfig(LinuxDBMSConfig):
+ EXTRACT_CMD="tar xzf {0} -C {1}"
+
+ def __init__(self, options, properties, storage_type):
+ super(SQLAConfig, self).__init__(options, properties, storage_type)
+
+ #Init the database configuration data here, if any
+ self.dbms = "sqlanywhere"
+ self.dbms_full_name = "SQL Anywhere"
+ self.driver_class_name = "sap.jdbc4.sqlanywhere.IDriver" #TODO sybase.* for v < 17, check requirements
+ self.driver_file_name = "sajdbc4.jar"
+ self.server_name = DBMSConfig._init_member_with_prop_default(options, "sqla_server_name", properties,
+ JDBC_SQLA_SERVER_NAME, "ambari")
+ self.driver_symlink_name = "sqlanywhere-jdbc-driver.jar"
+ self.client_tarball_pattern = "*sqla-client-jdbc*.tar.gz"
+ self.client_folder = "sqla-client-jdbc"
+
+ self.database_storage_name = "Database"
+ self.database_port = DBMSConfig._init_member_with_prop_default(options, "database_port",
+ properties, JDBC_PORT_PROPERTY, "2638")
+
+ self.database_url_pattern = "jdbc:sqlanywhere:eng={0};dbf={1};host={2};port={3}"
+ self.database_url_pattern_alt = "jdbc:sqlanywhere:eng={0};dbf={1};host={2};port={3}"
+
+ self.JDBC_DRIVER_INSTALL_MSG = 'Before starting Ambari Server, ' \
+ 'you must copy the {0} jdbc client tarball to {1}.'.format(
+ self.dbms_full_name, configDefaults.SHARE_PATH)
+
+ self.init_script_file = "/var/lib/ambari-server/resources/Ambari-DDL-SQLAnywhere-CREATE.sql"
+ self.drop_tables_script_file = "/var/lib/ambari-server/resources/Ambari-DDL-SQLAnywhere-DROP.sql"
+ self.client_tool_usage_pattern = 'stub string'
+
+ #
+ # Private implementation
+ #
+
+ def _get_jdbc_connection_string(self):
+ jdbc_hostname = self._get_database_hostname()
+
+ connectionStringFormat = self.database_url_pattern
+ return connectionStringFormat.format(self.server_name, self.database_name, jdbc_hostname, self.database_port)
+
+ def _reset_remote_database(self):
+ super(SQLAConfig, self)._reset_remote_database()
+
+ raise NonFatalException("Please replace '*' symbols with password before running DDL`s!")
+
+ def _is_jdbc_driver_installed(self, properties):
+ drivers = []
+ drivers.extend(glob.glob(configDefaults.SHARE_PATH + os.sep + self.client_tarball_pattern))
+ if drivers:
+ return drivers
+ return -1
+
+ def _install_jdbc_driver(self, properties, files_list):
+ return True
+
+ def _configure_database_name(self):
+ self.server_name = get_validated_string_input("Server name (" + str(self.server_name) + "): ",
+ self.server_name, ".*",
+ "Invalid server name",
+ False)
+ self.database_name = LinuxDBMSConfig._get_validated_db_name(self.database_storage_name, self.database_name)
+ return True
+
+ def _get_remote_script_line(self, scriptFile):
+ return "stub script line" #TODO not used anymore, investigate if it can be removed
+
+ def _store_remote_properties(self, properties):
+ """
+ Override the remote properties written for MySQL, inheriting those from the parent first.
+ :param properties: the properties object to set MySQL specific properties on
+ :return:
+ """
+ super(SQLAConfig, self)._store_remote_properties(properties)
+ properties.process_pair(JDBC_SQLA_SERVER_NAME, self.server_name)
+
+ def _extract_client_tarball(self, properties):
+ files = []
+ files.extend(glob.glob(configDefaults.SHARE_PATH + os.sep + self.client_tarball_pattern))
+
+ if len(files) > 1:
+ raise FatalException(-1, "More than One SQl Anywhere client tarball detected")
+ elif len(files) == 0:
+ raise FatalException(-1, self.JDBC_DRIVER_INSTALL_MSG)
+
+ cmd = SQLAConfig.EXTRACT_CMD.format(files[0], get_resources_location(properties))
+
+
+ process = subprocess.Popen(cmd.split(' '),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ out, err = process.communicate()
+ retcode = process.returncode
+
+ if retcode != 0:
+ raise FatalException(-1, "Error extracting SQL Anywhere client tarball: " + str(err))
+
+ def _get_native_libs(self, properties):
+ return os.path.join(get_resources_location(properties), self.client_folder, "native", "lib64")
+
+ def _get_default_driver_path(self, properties):
+ return os.path.join(get_resources_location(properties), self.client_folder, "java", self.driver_file_name)
+
+
+def createSQLAConfig(options, properties, storage_type, dbId):
+ return SQLAConfig(options, properties, storage_type)
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index d4d3679..5191e5c 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -93,6 +93,7 @@ JDBC_DATABASE_NAME_PROPERTY = "server.jdbc.database_name" # E.g., ambari.
JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
JDBC_PORT_PROPERTY = "server.jdbc.port"
JDBC_POSTGRES_SCHEMA_PROPERTY = "server.jdbc.postgres.schema" # Only for postgres, defaults to same value as DB name
+JDBC_SQLA_SERVER_NAME = "server.jdbc.sqla.server_name"
JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
JDBC_PASSWORD_PROPERTY = "server.jdbc.user.passwd"
@@ -220,6 +221,7 @@ class ServerDatabases(object):
mysql = ServerDatabaseEntry("mysql", "MySQL", ServerDatabaseType.remote)
mssql = ServerDatabaseEntry("mssql", "MSSQL", ServerDatabaseType.remote)
derby = ServerDatabaseEntry("derby", "Derby", ServerDatabaseType.remote)
+ sqlanywhere = ServerDatabaseEntry("sqlanywhere", "SQL Anywhere", ServerDatabaseType.remote)
postgres_internal = ServerDatabaseEntry("postgres", "Embedded Postgres", ServerDatabaseType.internal, aliases=['embedded'])
@staticmethod
@@ -250,6 +252,7 @@ class ServerDatabases(object):
class ServerConfigDefaults(object):
def __init__(self):
self.JAVA_SHARE_PATH = "/usr/share/java"
+ self.SHARE_PATH = "/usr/share"
self.OUT_DIR = os.sep + os.path.join("var", "log", "ambari-server")
self.SERVER_OUT_FILE = os.path.join(self.OUT_DIR, "ambari-server.out")
self.SERVER_LOG_FILE = os.path.join(self.OUT_DIR, "ambari-server.log")
@@ -609,6 +612,8 @@ def get_db_type(properties):
db_type = ServerDatabases.mssql
elif str(ServerDatabases.derby) in jdbc_url:
db_type = ServerDatabases.derby
+ elif str(ServerDatabases.sqlanywhere) in jdbc_url:
+ db_type = ServerDatabases.sqlanywhere
if persistence_type == "local" and db_type is None:
db_type = ServerDatabases.postgres_internal
@@ -1181,6 +1186,7 @@ def get_share_jars():
file_list.extend(glob.glob(configDefaults.JAVA_SHARE_PATH + os.sep + "*mysql*"))
file_list.extend(glob.glob(configDefaults.JAVA_SHARE_PATH + os.sep + "*sqljdbc*"))
file_list.extend(glob.glob(configDefaults.JAVA_SHARE_PATH + os.sep + "*ojdbc*"))
+ file_list.extend(glob.glob(configDefaults.JAVA_SHARE_PATH + os.sep + "*sajdbc4*"))
if len(file_list) > 0:
share_jars = string.join(file_list, os.pathsep)
return share_jars
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari_server/serverSetup.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py b/ambari-server/src/main/python/ambari_server/serverSetup.py
index 93b393a..67f67a7 100644
--- a/ambari-server/src/main/python/ambari_server/serverSetup.py
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -950,7 +950,7 @@ def _reset_database(options):
if persistence_type == "remote":
err = 'Ambari doesn\'t support resetting exernal DB automatically. ' \
'To reset Ambari Server schema you must first drop and then create it ' \
- 'using DDL scripts from "\/var/lib/ambari-server/resources/\"'
+ 'using DDL scripts from "/var/lib/ambari-server/resources/"'
raise FatalException(1, err)
else:
factory = DBMSConfigFactory()
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/python/ambari_server_main.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py
index bc561a1..cc37ce9 100644
--- a/ambari-server/src/main/python/ambari_server_main.py
+++ b/ambari-server/src/main/python/ambari_server_main.py
@@ -27,7 +27,8 @@ from ambari_commons.logging_utils import get_debug_mode, print_warning_msg, prin
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root
-from ambari_server.dbConfiguration import ensure_dbms_is_running, ensure_jdbc_driver_is_installed
+from ambari_server.dbConfiguration import ensure_dbms_is_running, ensure_jdbc_driver_is_installed, \
+ get_native_libs_path, get_jdbc_driver_path
from ambari_server.serverConfiguration import configDefaults, find_jdk, get_ambari_classpath, get_ambari_properties, \
get_conf_dir, get_is_persisted, get_is_secure, get_java_exe_path, get_original_master_key, read_ambari_user, \
PID_NAME, SECURITY_KEY_ENV_VAR_NAME, SECURITY_MASTER_KEY_LOCATION, \
@@ -94,6 +95,7 @@ SERVER_PING_TIMEOUT_WINDOWS = 5
SERVER_PING_ATTEMPTS_WINDOWS = 4
SERVER_CLASSPATH_KEY = "SERVER_CLASSPATH"
+LIBRARY_PATH_KEY = "LD_LIBRARY_PATH"
SERVER_SEARCH_PATTERN = "org.apache.ambari.server.controller.AmbariServer"
@@ -256,9 +258,19 @@ def server_process_main(options, scmStatus=None):
class_path = get_conf_dir()
class_path = os.path.abspath(class_path) + os.pathsep + get_ambari_classpath()
+ jdbc_driver_path = get_jdbc_driver_path(options, properties)
+ if jdbc_driver_path not in class_path:
+ class_path = class_path + os.pathsep + get_ambari_classpath()
+
if SERVER_CLASSPATH_KEY in os.environ:
class_path = os.environ[SERVER_CLASSPATH_KEY] + os.pathsep + class_path
+ native_libs_path = get_native_libs_path(options, properties)
+ if native_libs_path is not None:
+ if LIBRARY_PATH_KEY in os.environ:
+ native_libs_path = os.environ[LIBRARY_PATH_KEY] + os.pathsep + native_libs_path
+ os.environ[LIBRARY_PATH_KEY] = native_libs_path
+
debug_mode = get_debug_mode()
debug_start = (debug_mode & 1) or SERVER_START_DEBUG
suspend_start = (debug_mode & 2) or SUSPEND_START_MODE
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
new file mode 100644
index 0000000..9f21160
--- /dev/null
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -0,0 +1,1219 @@
+--
+-- Licensed to the stackpache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+CREATE TABLE stack(
+ stack_id NUMERIC(19) NOT NULL,
+ stack_name VARCHAR(255) NOT NULL,
+ stack_version VARCHAR(255) NOT NULL,
+ PRIMARY KEY (stack_id)
+);
+
+CREATE TABLE clusters (
+ cluster_id NUMERIC(19) NOT NULL,
+ resource_id NUMERIC(19) NOT NULL,
+ cluster_info VARCHAR(255) NOT NULL,
+ cluster_name VARCHAR(100) NOT NULL UNIQUE,
+ provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
+ security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
+ desired_cluster_state VARCHAR(255) NOT NULL,
+ desired_stack_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (cluster_id)
+);
+
+CREATE TABLE clusterconfig (
+ config_id NUMERIC(19) NOT NULL,
+ version_tag VARCHAR(255) NOT NULL,
+ version NUMERIC(19) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ stack_id NUMERIC(19) NOT NULL,
+ config_data TEXT NOT NULL,
+ config_attributes TEXT,
+ create_timestamp NUMERIC(19) NOT NULL,
+ PRIMARY KEY (config_id)
+);
+
+CREATE TABLE serviceconfig (
+ service_config_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ version NUMERIC(19) NOT NULL,
+ create_timestamp NUMERIC(19) NOT NULL,
+ stack_id NUMERIC(19) NOT NULL,
+ user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+ group_id NUMERIC(19),
+ note TEXT,
+ PRIMARY KEY (service_config_id)
+);
+
+CREATE TABLE serviceconfighosts (
+ service_config_id NUMERIC(19) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(service_config_id, host_id));
+
+CREATE TABLE serviceconfigmapping (
+ service_config_id NUMERIC(19) NOT NULL,
+ config_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(service_config_id, config_id));
+
+CREATE TABLE clusterservices (
+ service_name VARCHAR(255) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ service_enabled INTEGER NOT NULL,
+ PRIMARY KEY (service_name, cluster_id));
+
+CREATE TABLE clusterstate (
+ cluster_id NUMERIC(19) NOT NULL,
+ current_cluster_state VARCHAR(255) NOT NULL,
+ current_stack_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (cluster_id)
+);
+
+CREATE TABLE cluster_version (
+ id NUMERIC(19) NOT NULL,
+ repo_version_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ state VARCHAR(32) NOT NULL,
+ start_time NUMERIC(19) NOT NULL,
+ end_time NUMERIC(19),
+ user_name VARCHAR(32),
+ PRIMARY KEY (id));
+
+CREATE TABLE hostcomponentdesiredstate (
+ cluster_id NUMERIC(19) NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ desired_stack_id NUMERIC(19) NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ admin_state VARCHAR(32),
+ maintenance_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE',
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ restart_required BIT NOT NULL DEFAULT 0,
+ PRIMARY KEY (cluster_id, component_name, host_id, service_name)
+);
+
+CREATE TABLE hostcomponentstate (
+ cluster_id NUMERIC(19) NOT NULL,
+ component_name VARCHAR(255) NOT NULL,
+ version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
+ current_stack_id NUMERIC(19) NOT NULL,
+ current_state VARCHAR(255) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ PRIMARY KEY (cluster_id, component_name, host_id, service_name)
+);
+
+CREATE TABLE hosts (
+ host_id NUMERIC(19) NOT NULL,
+ host_name VARCHAR(255) NOT NULL,
+ cpu_count INTEGER NOT NULL,
+ cpu_info VARCHAR(255) NOT NULL,
+ discovery_status VARCHAR(2000) NOT NULL,
+ host_attributes TEXT NOT NULL,
+ ipv4 VARCHAR(255),
+ ipv6 VARCHAR(255),
+ last_registration_time NUMERIC(19) NOT NULL,
+ os_arch VARCHAR(255) NOT NULL,
+ os_info VARCHAR(1000) NOT NULL,
+ os_type VARCHAR(255) NOT NULL,
+ ph_cpu_count INTEGER,
+ public_host_name VARCHAR(255),
+ rack_info VARCHAR(255) NOT NULL,
+ total_mem NUMERIC(19) NOT NULL,
+ PRIMARY KEY (host_id));
+
+CREATE TABLE hoststate (
+ agent_version VARCHAR(255) NOT NULL,
+ available_mem NUMERIC(19) NOT NULL,
+ current_state VARCHAR(255) NOT NULL,
+ health_status VARCHAR(255),
+ host_id NUMERIC(19) NOT NULL,
+ time_in_state NUMERIC(19) NOT NULL,
+ maintenance_state VARCHAR(512),
+ PRIMARY KEY (host_id));
+
+CREATE TABLE host_version (
+ id NUMERIC(19) NOT NULL,
+ repo_version_id NUMERIC(19) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ state VARCHAR(32) NOT NULL,
+ PRIMARY KEY (id));
+
+CREATE TABLE servicecomponentdesiredstate (
+ component_name VARCHAR(255) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ desired_stack_id NUMERIC(19) NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ PRIMARY KEY (component_name, cluster_id, service_name)
+);
+
+CREATE TABLE servicedesiredstate (
+ cluster_id NUMERIC(19) NOT NULL,
+ desired_host_role_mapping INTEGER NOT NULL,
+ desired_stack_id NUMERIC(19) NOT NULL,
+ desired_state VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ maintenance_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE',
+ security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
+ PRIMARY KEY (cluster_id, service_name)
+);
+
+CREATE TABLE users (
+ user_id INTEGER,
+ principal_id NUMERIC(19) NOT NULL,
+ create_time TIMESTAMP DEFAULT NOW(),
+ ldap_user INTEGER NOT NULL DEFAULT 0,
+ user_name VARCHAR(255) NOT NULL,
+ user_password VARCHAR(255),
+ active INTEGER NOT NULL DEFAULT 1,
+ active_widget_layouts VARCHAR(1024) DEFAULT NULL,
+ PRIMARY KEY (user_id));
+
+CREATE TABLE groups (
+ group_id INTEGER,
+ principal_id NUMERIC(19) NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ ldap_group INTEGER NOT NULL DEFAULT 0,
+ PRIMARY KEY (group_id));
+
+CREATE TABLE members (
+ member_id INTEGER,
+ group_id INTEGER NOT NULL,
+ user_id INTEGER NOT NULL,
+ PRIMARY KEY (member_id));
+
+CREATE TABLE execution_command (
+ task_id NUMERIC(19) NOT NULL,
+ command IMAGE,
+ PRIMARY KEY (task_id));
+
+CREATE TABLE host_role_command (
+ task_id NUMERIC(19) NOT NULL,
+ attempt_count SMALLINT NOT NULL,
+ retry_allowed SMALLINT DEFAULT 0 NOT NULL,
+ event TEXT NOT NULL,
+ exitcode INTEGER NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ last_attempt_time NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ role VARCHAR(255),
+ role_command VARCHAR(255),
+ stage_id NUMERIC(19) NOT NULL,
+ start_time NUMERIC(19) NOT NULL,
+ end_time NUMERIC(19),
+ status VARCHAR(255),
+ std_error IMAGE,
+ std_out IMAGE,
+ output_log VARCHAR(255) NULL,
+ error_log VARCHAR(255) NULL,
+ structured_out IMAGE,
+ command_detail VARCHAR(255),
+ custom_command_name VARCHAR(255),
+ PRIMARY KEY (task_id));
+
+CREATE TABLE role_success_criteria (
+ role VARCHAR(255) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ stage_id NUMERIC(19) NOT NULL,
+ success_factor FLOAT(32) NOT NULL,
+ PRIMARY KEY (role, request_id, stage_id));
+
+CREATE TABLE stage (
+ stage_id NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19),
+ skippable SMALLINT DEFAULT 0 NOT NULL,
+ log_info VARCHAR(255) NOT NULL,
+ request_context VARCHAR(255),
+ cluster_host_info IMAGE,
+ command_params IMAGE,
+ host_params IMAGE,
+ PRIMARY KEY (stage_id, request_id));
+
+CREATE TABLE request (
+ request_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19),
+ request_schedule_id NUMERIC(19),
+ command_name VARCHAR(255),
+ create_time NUMERIC(19) NOT NULL,
+ end_time NUMERIC(19) NOT NULL,
+ exclusive_execution BIT NOT NULL DEFAULT 0,
+ inputs IMAGE,
+ request_context VARCHAR(255),
+ request_type VARCHAR(255),
+ start_time NUMERIC(19) NOT NULL,
+ status VARCHAR(255),
+ PRIMARY KEY (request_id));
+
+CREATE TABLE requestresourcefilter (
+ filter_id NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255),
+ component_name VARCHAR(255),
+ hosts IMAGE,
+ PRIMARY KEY (filter_id));
+
+CREATE TABLE requestoperationlevel (
+ operation_level_id NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ level_name VARCHAR(255),
+ cluster_name VARCHAR(255),
+ service_name VARCHAR(255),
+ host_component_name VARCHAR(255),
+ host_id NUMERIC(19) NULL, -- unlike most host_id columns, this one allows NULLs because the request can be at the service level
+ PRIMARY KEY (operation_level_id));
+
+CREATE TABLE key_value_store ("key" VARCHAR(255),
+ "value" TEXT,
+ PRIMARY KEY ("key"));
+
+CREATE TABLE clusterconfigmapping (
+ type_name VARCHAR(255) NOT NULL,
+ create_timestamp NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ selected INTEGER NOT NULL DEFAULT 0,
+ version_tag VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+ PRIMARY KEY (type_name, create_timestamp, cluster_id));
+
+CREATE TABLE hostconfigmapping (
+ create_timestamp NUMERIC(19) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ selected INTEGER NOT NULL DEFAULT 0,
+ service_name VARCHAR(255),
+ version_tag VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL DEFAULT '_db',
+ PRIMARY KEY (create_timestamp, host_id, cluster_id, type_name));
+
+CREATE TABLE metainfo (
+ metainfo_key VARCHAR(255),
+ metainfo_value TEXT,
+ PRIMARY KEY (metainfo_key));
+
+CREATE TABLE ClusterHostMapping (
+ cluster_id NUMERIC(19) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (cluster_id, host_id));
+
+CREATE TABLE ambari_sequences (
+ sequence_name VARCHAR(255),
+ sequence_value NUMERIC(38) NOT NULL,
+ PRIMARY KEY (sequence_name));
+
+CREATE TABLE confgroupclusterconfigmapping (
+ config_group_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ config_type VARCHAR(255) NOT NULL,
+ version_tag VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) DEFAULT '_db',
+ create_timestamp NUMERIC(19) NOT NULL,
+ PRIMARY KEY(config_group_id, cluster_id, config_type));
+
+CREATE TABLE configgroup (
+ group_id NUMERIC(19),
+ cluster_id NUMERIC(19) NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ tag VARCHAR(1024) NOT NULL,
+ description VARCHAR(1024),
+ create_timestamp NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255),
+ PRIMARY KEY(group_id));
+
+CREATE TABLE configgrouphostmapping (
+ config_group_id NUMERIC(19) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(config_group_id, host_id));
+
+CREATE TABLE requestschedule (
+ schedule_id NUMERIC(19),
+ cluster_id NUMERIC(19) NOT NULL,
+ description VARCHAR(255),
+ status VARCHAR(255),
+ batch_separation_seconds smallint,
+ batch_toleration_limit smallint,
+ create_user VARCHAR(255),
+ create_timestamp NUMERIC(19),
+ update_user VARCHAR(255),
+ update_timestamp NUMERIC(19),
+ minutes VARCHAR(10),
+ hours VARCHAR(10),
+ days_of_month VARCHAR(10),
+ month VARCHAR(10),
+ day_of_week VARCHAR(10),
+ yearToSchedule VARCHAR(10),
+ startTime VARCHAR(50),
+ endTime VARCHAR(50),
+ last_execution_status VARCHAR(255),
+ PRIMARY KEY(schedule_id));
+
+CREATE TABLE requestschedulebatchrequest (
+ schedule_id NUMERIC(19),
+ batch_id NUMERIC(19),
+ request_id NUMERIC(19),
+ request_type VARCHAR(255),
+ request_uri VARCHAR(1024),
+ request_body IMAGE,
+ request_status VARCHAR(255),
+ return_code smallint,
+ return_message VARCHAR(2000),
+ PRIMARY KEY(schedule_id, batch_id));
+
+CREATE TABLE blueprint (
+ blueprint_name VARCHAR(255) NOT NULL,
+ stack_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(blueprint_name)
+);
+
+CREATE TABLE hostgroup (
+ blueprint_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ cardinality VARCHAR(255) NOT NULL,
+ PRIMARY KEY(blueprint_name, name));
+
+CREATE TABLE hostgroup_component (
+ blueprint_name VARCHAR(255) NOT NULL,
+ hostgroup_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ PRIMARY KEY(blueprint_name, hostgroup_name, name));
+
+CREATE TABLE blueprint_configuration (
+ blueprint_name VARCHAR(255) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ config_data TEXT NOT NULL,
+ config_attributes TEXT,
+ PRIMARY KEY(blueprint_name, type_name));
+
+CREATE TABLE hostgroup_configuration (
+ blueprint_name VARCHAR(255) NOT NULL,
+ hostgroup_name VARCHAR(255) NOT NULL,
+ type_name VARCHAR(255) NOT NULL,
+ config_data TEXT NOT NULL,
+ config_attributes TEXT,
+ PRIMARY KEY(blueprint_name, hostgroup_name, type_name));
+
+CREATE TABLE viewmain (
+ view_name VARCHAR(255) NOT NULL,
+ label VARCHAR(255),
+ description VARCHAR(2048),
+ version VARCHAR(255),
+ build VARCHAR(128),
+ resource_type_id INTEGER NOT NULL,
+ icon VARCHAR(255),
+ icon64 VARCHAR(255),
+ archive VARCHAR(255),
+ mask VARCHAR(255),
+ system_view BIT NOT NULL DEFAULT 0,
+ PRIMARY KEY(view_name));
+
+CREATE TABLE viewinstancedata (
+ view_instance_id NUMERIC(19),
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL,
+ value VARCHAR(2000),
+ PRIMARY KEY(VIEW_INSTANCE_ID, NAME, USER_NAME));
+
+CREATE TABLE viewinstance (
+ view_instance_id NUMERIC(19),
+ resource_id NUMERIC(19) NOT NULL,
+ view_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ label VARCHAR(255),
+ description VARCHAR(2048),
+ visible CHAR(1),
+ icon VARCHAR(255),
+ icon64 VARCHAR(255),
+ xml_driven CHAR(1),
+ alter_names BIT NOT NULL DEFAULT 1,
+ cluster_handle VARCHAR(255),
+ PRIMARY KEY(view_instance_id));
+
+CREATE TABLE viewinstanceproperty (
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ value VARCHAR(2000),
+ PRIMARY KEY(view_name, view_instance_name, name));
+
+CREATE TABLE viewparameter (
+ view_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ description VARCHAR(2048),
+ label VARCHAR(255),
+ placeholder VARCHAR(255),
+ default_value VARCHAR(2000),
+ cluster_config VARCHAR(255),
+ required CHAR(1),
+ masked CHAR(1),
+ PRIMARY KEY(view_name, name));
+
+CREATE TABLE viewresource (
+ view_name VARCHAR(255) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ plural_name VARCHAR(255),
+ id_property VARCHAR(255),
+ subResource_names VARCHAR(255),
+ provider VARCHAR(255),
+ service VARCHAR(255),
+ "resource" VARCHAR(255),
+ PRIMARY KEY(view_name, name));
+
+CREATE TABLE viewentity (
+ id NUMERIC(19) NOT NULL,
+ view_name VARCHAR(255) NOT NULL,
+ view_instance_name VARCHAR(255) NOT NULL,
+ class_name VARCHAR(255) NOT NULL,
+ id_property VARCHAR(255),
+ PRIMARY KEY(id));
+
+CREATE TABLE adminresourcetype (
+ resource_type_id INTEGER NOT NULL,
+ resource_type_name VARCHAR(255) NOT NULL,
+ PRIMARY KEY(resource_type_id));
+
+CREATE TABLE adminresource (
+ resource_id NUMERIC(19) NOT NULL,
+ resource_type_id INTEGER NOT NULL,
+ PRIMARY KEY(resource_id));
+
+CREATE TABLE adminprincipaltype (
+ principal_type_id INTEGER NOT NULL,
+ principal_type_name VARCHAR(255) NOT NULL,
+ PRIMARY KEY(principal_type_id));
+
+CREATE TABLE adminprincipal (
+ principal_id NUMERIC(19) NOT NULL,
+ principal_type_id INTEGER NOT NULL,
+ PRIMARY KEY(principal_id));
+
+CREATE TABLE adminpermission (
+ permission_id NUMERIC(19) NOT NULL,
+ permission_name VARCHAR(255) NOT NULL,
+ resource_type_id INTEGER NOT NULL,
+ PRIMARY KEY(permission_id));
+
+CREATE TABLE adminprivilege (
+ privilege_id NUMERIC(19),
+ permission_id NUMERIC(19) NOT NULL,
+ resource_id NUMERIC(19) NOT NULL,
+ principal_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(privilege_id));
+
+CREATE TABLE repo_version (
+ repo_version_id NUMERIC(19) NOT NULL,
+ stack_id NUMERIC(19) NOT NULL,
+ version VARCHAR(255) NOT NULL,
+ display_name VARCHAR(128) NOT NULL,
+ upgrade_package VARCHAR(255) NOT NULL,
+ repositories TEXT NOT NULL,
+ PRIMARY KEY(repo_version_id)
+);
+
+CREATE TABLE widget (
+ id NUMERIC(19) NOT NULL,
+ widget_name VARCHAR(255) NOT NULL,
+ widget_type VARCHAR(255) NOT NULL,
+ metrics TEXT,
+ time_created NUMERIC(19) NOT NULL,
+ author VARCHAR(255),
+ description VARCHAR(2048),
+ default_section_name VARCHAR(255),
+ scope VARCHAR(255),
+ widget_values TEXT,
+ properties TEXT,
+ cluster_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(id)
+);
+
+CREATE TABLE widget_layout (
+ id NUMERIC(19) NOT NULL,
+ layout_name VARCHAR(255) NOT NULL,
+ section_name VARCHAR(255) NOT NULL,
+ scope VARCHAR(255) NOT NULL,
+ user_name VARCHAR(255) NOT NULL,
+ display_name VARCHAR(255),
+ cluster_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(id)
+);
+
+CREATE TABLE widget_layout_user_widget (
+ widget_layout_id NUMERIC(19) NOT NULL,
+ widget_id NUMERIC(19) NOT NULL,
+ widget_order smallint,
+ PRIMARY KEY(widget_layout_id, widget_id)
+);
+
+CREATE TABLE artifact (
+ artifact_name VARCHAR(255) NOT NULL,
+ foreign_keys VARCHAR(255) NOT NULL,
+ artifact_data TEXT NOT NULL,
+ PRIMARY KEY(artifact_name, foreign_keys));
+
+CREATE TABLE topology_request (
+ id NUMERIC(19) NOT NULL,
+ action VARCHAR(255) NOT NULL,
+ cluster_name VARCHAR(100) NOT NULL,
+ bp_name VARCHAR(100) NOT NULL,
+ cluster_properties TEXT,
+ cluster_attributes TEXT,
+ description VARCHAR(1024),
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_hostgroup (
+ id NUMERIC(19) NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ group_properties TEXT,
+ group_attributes TEXT,
+ request_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_host_info (
+ id NUMERIC(19) NOT NULL,
+ group_id NUMERIC(19) NOT NULL,
+ fqdn VARCHAR(255),
+ host_count INTEGER,
+ predicate VARCHAR(2048),
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_logical_request (
+ id NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ description VARCHAR(1024),
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_host_request (
+ id NUMERIC(19) NOT NULL,
+ logical_request_id NUMERIC(19) NOT NULL,
+ group_id NUMERIC(19) NOT NULL,
+ stage_id NUMERIC(19) NOT NULL,
+ host_name VARCHAR(255),
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_host_task (
+ id NUMERIC(19) NOT NULL,
+ host_request_id NUMERIC(19) NOT NULL,
+ type VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE topology_logical_task (
+ id NUMERIC(19) NOT NULL,
+ host_task_id NUMERIC(19) NOT NULL,
+ physical_task_id NUMERIC(19),
+ component VARCHAR(255) NOT NULL,
+ PRIMARY KEY (id)
+);
+
+-- altering tables by creating unique constraints----------
+ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
+ALTER TABLE groups ADD CONSTRAINT UNQ_groups_0 UNIQUE (group_name, ldap_group);
+ALTER TABLE members ADD CONSTRAINT UNQ_members_0 UNIQUE (group_id, user_id);
+ALTER TABLE clusterconfig ADD CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag);
+ALTER TABLE clusterconfig ADD CONSTRAINT UQ_config_type_version UNIQUE (cluster_id, type_name, version);
+ALTER TABLE hosts ADD CONSTRAINT UQ_hosts_host_name UNIQUE (host_name);
+ALTER TABLE viewinstance ADD CONSTRAINT UQ_viewinstance_name UNIQUE (view_name, name);
+ALTER TABLE viewinstance ADD CONSTRAINT UQ_viewinstance_name_id UNIQUE (view_instance_id, view_name, name);
+ALTER TABLE serviceconfig ADD CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_name, version);
+ALTER TABLE adminpermission ADD CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id);
+ALTER TABLE repo_version ADD CONSTRAINT UQ_repo_version_display_name UNIQUE (display_name);
+ALTER TABLE repo_version ADD CONSTRAINT UQ_repo_version_stack_id UNIQUE (stack_id, version);
+ALTER TABLE stack ADD CONSTRAINT unq_stack UNIQUE (stack_name, stack_version);
+
+-- altering tables by creating foreign keys----------
+-- Note, Oracle has a limitation of 32 chars in the FK name, and we should use the same FK name in all DB types.
+ALTER TABLE members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id);
+ALTER TABLE members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE cluster_version ADD CONSTRAINT FK_cluster_version_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE cluster_version ADD CONSTRAINT FK_cluster_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE host_version ADD CONSTRAINT FK_host_version_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE host_version ADD CONSTRAINT FK_host_version_repovers_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE role_success_criteria ADD CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_request_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE request ADD CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_clhostmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_clusterhostmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT FK_scvhosts_host_id FOREIGN KEY (host_id) REFERENCES hosts(host_id);
+ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
+ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
+ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_cgid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
+ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_cghm_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE requestschedulebatchrequest ADD CONSTRAINT FK_rsbatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES requestschedule (schedule_id);
+ALTER TABLE hostgroup ADD CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
+ALTER TABLE hostgroup_component ADD CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup(blueprint_name, name);
+ALTER TABLE blueprint_configuration ADD CONSTRAINT FK_cfg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name);
+ALTER TABLE hostgroup_configuration ADD CONSTRAINT FK_hg_cfg_bp_hg_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name);
+ALTER TABLE requestresourcefilter ADD CONSTRAINT FK_reqresfilter_req_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE requestoperationlevel ADD CONSTRAINT FK_req_op_level_req_id FOREIGN KEY (request_id) REFERENCES request (request_id);
+ALTER TABLE viewparameter ADD CONSTRAINT FK_viewparam_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewresource ADD CONSTRAINT FK_viewres_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewinstance ADD CONSTRAINT FK_viewinst_view_name FOREIGN KEY (view_name) REFERENCES viewmain(view_name);
+ALTER TABLE viewinstanceproperty ADD CONSTRAINT FK_viewinstprop_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name);
+ALTER TABLE viewinstancedata ADD CONSTRAINT FK_viewinstdata_view_name FOREIGN KEY (view_instance_id, view_name, view_instance_name) REFERENCES viewinstance(view_instance_id, view_name, name);
+ALTER TABLE viewentity ADD CONSTRAINT FK_viewentity_view_name FOREIGN KEY (view_name, view_instance_name) REFERENCES viewinstance(view_name, name);
+ALTER TABLE adminresource ADD CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE adminprincipal ADD CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES adminprincipaltype(principal_type_id);
+ALTER TABLE adminpermission ADD CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_permission_id FOREIGN KEY (permission_id) REFERENCES adminpermission(permission_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+ALTER TABLE viewmain ADD CONSTRAINT FK_view_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES adminresourcetype(resource_type_id);
+ALTER TABLE viewinstance ADD CONSTRAINT FK_viewinstance_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+ALTER TABLE adminprivilege ADD CONSTRAINT FK_privilege_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE users ADD CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE groups ADD CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
+ALTER TABLE clusters ADD CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
+ALTER TABLE widget_layout_user_widget ADD CONSTRAINT FK_widget_layout_id FOREIGN KEY (widget_layout_id) REFERENCES widget_layout(id);
+ALTER TABLE widget_layout_user_widget ADD CONSTRAINT FK_widget_id FOREIGN KEY (widget_id) REFERENCES widget(id);
+ALTER TABLE topology_hostgroup ADD CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id);
+ALTER TABLE topology_host_info ADD CONSTRAINT FK_hostinfo_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id);
+ALTER TABLE topology_logical_request ADD CONSTRAINT FK_logicalreq_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id);
+ALTER TABLE topology_host_request ADD CONSTRAINT FK_hostreq_logicalreq_id FOREIGN KEY (logical_request_id) REFERENCES topology_logical_request(id);
+ALTER TABLE topology_host_request ADD CONSTRAINT FK_hostreq_group_id FOREIGN KEY (group_id) REFERENCES topology_hostgroup(id);
+ALTER TABLE topology_host_task ADD CONSTRAINT FK_hosttask_req_id FOREIGN KEY (host_request_id) REFERENCES topology_host_request (id);
+ALTER TABLE topology_logical_task ADD CONSTRAINT FK_ltask_hosttask_id FOREIGN KEY (host_task_id) REFERENCES topology_host_task (id);
+ALTER TABLE topology_logical_task ADD CONSTRAINT FK_ltask_hrc_id FOREIGN KEY (physical_task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE clusters ADD CONSTRAINT FK_clusters_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id);
+ALTER TABLE serviceconfig ADD CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_cs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id);
+ALTER TABLE blueprint ADD CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id);
+ALTER TABLE repo_version ADD CONSTRAINT FK_repoversion_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id);
+
+-- Kerberos
+CREATE TABLE kerberos_principal (
+ principal_name VARCHAR(255) NOT NULL,
+ is_service SMALLINT NOT NULL DEFAULT 1,
+ cached_keytab_path VARCHAR(255),
+ PRIMARY KEY(principal_name)
+);
+
+CREATE TABLE kerberos_principal_host (
+ principal_name VARCHAR(255) NOT NULL,
+ host_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY(principal_name, host_id)
+);
+
+ALTER TABLE kerberos_principal_host ADD CONSTRAINT FK_krb_pr_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id);
+ALTER TABLE kerberos_principal_host ADD CONSTRAINT FK_krb_pr_host_principalname FOREIGN KEY (principal_name) REFERENCES kerberos_principal (principal_name);
+-- Kerberos (end)
+
+-- Alerting Framework
+CREATE TABLE alert_definition (
+ definition_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ definition_name VARCHAR(255) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ scope VARCHAR(255) DEFAULT 'ANY' NOT NULL,
+ label VARCHAR(255),
+ description TEXT,
+ enabled SMALLINT DEFAULT 1 NOT NULL,
+ schedule_interval INTEGER NOT NULL,
+ source_type VARCHAR(255) NOT NULL,
+ alert_source TEXT NOT NULL,
+ hash VARCHAR(64) NOT NULL,
+ ignore_host SMALLINT DEFAULT 0 NOT NULL,
+ PRIMARY KEY (definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+ CONSTRAINT uni_alert_def_name UNIQUE(cluster_id,definition_name)
+);
+
+CREATE TABLE alert_history (
+ alert_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ alert_definition_id NUMERIC(19) NOT NULL,
+ service_name VARCHAR(255) NOT NULL,
+ component_name VARCHAR(255),
+ host_name VARCHAR(255),
+ alert_instance VARCHAR(255),
+ alert_timestamp NUMERIC(19) NOT NULL,
+ alert_label VARCHAR(1024),
+ alert_state VARCHAR(255) NOT NULL,
+ alert_text TEXT,
+ PRIMARY KEY (alert_id),
+ FOREIGN KEY (alert_definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id)
+);
+
+CREATE TABLE alert_current (
+ alert_id NUMERIC(19) NOT NULL,
+ definition_id NUMERIC(19) NOT NULL,
+ history_id NUMERIC(19) NOT NULL UNIQUE,
+ maintenance_state VARCHAR(255) NOT NULL,
+ original_timestamp NUMERIC(19) NOT NULL,
+ latest_timestamp NUMERIC(19) NOT NULL,
+ latest_text TEXT,
+ PRIMARY KEY (alert_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+);
+
+CREATE TABLE alert_group (
+ group_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ group_name VARCHAR(255) NOT NULL,
+ is_default SMALLINT NOT NULL DEFAULT 0,
+ service_name VARCHAR(255),
+ PRIMARY KEY (group_id),
+ CONSTRAINT uni_alert_group_name UNIQUE(cluster_id,group_name)
+);
+
+CREATE TABLE alert_target (
+ target_id NUMERIC(19) NOT NULL,
+ target_name VARCHAR(255) NOT NULL UNIQUE,
+ notification_type VARCHAR(64) NOT NULL,
+ properties TEXT,
+ description VARCHAR(1024),
+ is_global SMALLINT NOT NULL DEFAULT 0,
+ PRIMARY KEY (target_id)
+);
+
+CREATE TABLE alert_target_states (
+ target_id NUMERIC(19) NOT NULL,
+ alert_state VARCHAR(255) NOT NULL,
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+);
+
+CREATE TABLE alert_group_target (
+ group_id NUMERIC(19) NOT NULL,
+ target_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (group_id, target_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id)
+);
+
+CREATE TABLE alert_grouping (
+ definition_id NUMERIC(19) NOT NULL,
+ group_id NUMERIC(19) NOT NULL,
+ PRIMARY KEY (group_id, definition_id),
+ FOREIGN KEY (definition_id) REFERENCES alert_definition(definition_id),
+ FOREIGN KEY (group_id) REFERENCES alert_group(group_id)
+);
+
+CREATE TABLE alert_notice (
+ notification_id NUMERIC(19) NOT NULL,
+ target_id NUMERIC(19) NOT NULL,
+ history_id NUMERIC(19) NOT NULL,
+ notify_state VARCHAR(255) NOT NULL,
+ uuid VARCHAR(64) NOT NULL UNIQUE,
+ PRIMARY KEY (notification_id),
+ FOREIGN KEY (target_id) REFERENCES alert_target(target_id),
+ FOREIGN KEY (history_id) REFERENCES alert_history(alert_id)
+);
+
+CREATE INDEX idx_alert_history_def_id on alert_history(alert_definition_id);
+CREATE INDEX idx_alert_history_service on alert_history(service_name);
+CREATE INDEX idx_alert_history_host on alert_history(host_name);
+CREATE INDEX idx_alert_history_time on alert_history(alert_timestamp);
+CREATE INDEX idx_alert_history_state on alert_history(alert_state);
+CREATE INDEX idx_alert_group_name on alert_group(group_name);
+CREATE INDEX idx_alert_notice_state on alert_notice(notify_state);
+
+-- upgrade tables
+CREATE TABLE upgrade (
+ upgrade_id NUMERIC(19) NOT NULL,
+ cluster_id NUMERIC(19) NOT NULL,
+ request_id NUMERIC(19) NOT NULL,
+ from_version VARCHAR(255) DEFAULT '' NOT NULL,
+ to_version VARCHAR(255) DEFAULT '' NOT NULL,
+ direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+ PRIMARY KEY (upgrade_id),
+ FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
+ FOREIGN KEY (request_id) REFERENCES request(request_id)
+);
+
+CREATE TABLE upgrade_group (
+ upgrade_group_id NUMERIC(19) NOT NULL,
+ upgrade_id NUMERIC(19) NOT NULL,
+ group_name VARCHAR(255) DEFAULT '' NOT NULL,
+ group_title VARCHAR(1024) DEFAULT '' NOT NULL,
+ PRIMARY KEY (upgrade_group_id),
+ FOREIGN KEY (upgrade_id) REFERENCES upgrade(upgrade_id)
+);
+
+CREATE TABLE upgrade_item (
+ upgrade_item_id NUMERIC(19) NOT NULL,
+ upgrade_group_id NUMERIC(19) NOT NULL,
+ stage_id NUMERIC(19) NOT NULL,
+ state VARCHAR(255) DEFAULT 'NONE' NOT NULL,
+ hosts TEXT,
+ tasks TEXT,
+ item_text VARCHAR(1024),
+ PRIMARY KEY (upgrade_item_id),
+ FOREIGN KEY (upgrade_group_id) REFERENCES upgrade_group(upgrade_group_id)
+);
+
+-- In order for the first ID to be 1, must initialize the ambari_sequences table with a sequence_value of 0.
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('cluster_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('host_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('host_role_command_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('user_id_seq', 2);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('group_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('member_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('configgroup_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('requestschedule_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('resourcefilter_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('viewentity_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('operation_level_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('view_instance_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('resource_type_id_seq', 4);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('resource_id_seq', 2);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('principal_type_id_seq', 3);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('principal_id_seq', 2);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('permission_id_seq', 5);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('privilege_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('config_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('cluster_version_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('host_version_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('service_config_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_definition_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_group_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_target_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_history_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_notice_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_current_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('repo_version_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_group_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_item_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('stack_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_layout_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_info_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_request_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_task_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_logical_request_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_logical_task_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_request_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_group_id_seq', 0);
+
+insert into adminresourcetype (resource_type_id, resource_type_name)
+ select 1, 'AMBARI'
+ union all
+ select 2, 'CLUSTER'
+ union all
+ select 3, 'VIEW';
+
+insert into adminresource (resource_id, resource_type_id)
+ select 1, 1;
+
+insert into adminprincipaltype (principal_type_id, principal_type_name)
+ select 1, 'USER'
+ union all
+ select 2, 'GROUP';
+
+insert into adminprincipal (principal_id, principal_type_id)
+ select 1, 1;
+
+insert into users(user_id, principal_id, user_name, user_password)
+ select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
+
+insert into adminpermission(permission_id, permission_name, resource_type_id)
+ select 1, 'AMBARI.ADMIN', 1
+ union all
+ select 2, 'CLUSTER.READ', 2
+ union all
+ select 3, 'CLUSTER.OPERATE', 2
+ union all
+ select 4, 'VIEW.USE', 3;
+
+insert into adminprivilege (privilege_id, permission_id, resource_id, principal_id)
+ select 1, 1, 1, 1;
+
+insert into metainfo(metainfo_key, metainfo_value)
+ select 'version','${ambariVersion}';
+
+-- Quartz tables
+
+CREATE TABLE QRTZ_JOB_DETAILS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ JOB_CLASS_NAME VARCHAR(250) NOT NULL,
+ IS_DURABLE VARCHAR(1) NOT NULL,
+ IS_NONCONCURRENT VARCHAR(1) NOT NULL,
+ IS_UPDATE_DATA VARCHAR(1) NOT NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
+ JOB_DATA IMAGE NULL,
+ PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE QRTZ_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ JOB_NAME VARCHAR(200) NOT NULL,
+ JOB_GROUP VARCHAR(200) NOT NULL,
+ DESCRIPTION VARCHAR(250) NULL,
+ NEXT_FIRE_TIME NUMERIC(13) NULL,
+ PREV_FIRE_TIME NUMERIC(13) NULL,
+ PRIORITY INTEGER NULL,
+ TRIGGER_STATE VARCHAR(16) NOT NULL,
+ TRIGGER_TYPE VARCHAR(8) NOT NULL,
+ START_TIME NUMERIC(13) NOT NULL,
+ END_TIME NUMERIC(13) NULL,
+ CALENDAR_NAME VARCHAR(200) NULL,
+ MISFIRE_INSTR SMALLINT NULL,
+ JOB_DATA IMAGE NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
+ REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP)
+);
+
+CREATE TABLE QRTZ_SIMPLE_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ REPEAT_COUNT NUMERIC(7) NOT NULL,
+ REPEAT_INTERVAL NUMERIC(12) NOT NULL,
+ TIMES_TRIGGERED NUMERIC(10) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_CRON_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ CRON_EXPRESSION VARCHAR(200) NOT NULL,
+ TIME_ZONE_ID VARCHAR(80),
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_SIMPROP_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ STR_PROP_1 VARCHAR(512) NULL,
+ STR_PROP_2 VARCHAR(512) NULL,
+ STR_PROP_3 VARCHAR(512) NULL,
+ INT_PROP_1 INTEGER NULL,
+ INT_PROP_2 INTEGER NULL,
+ LONG_PROP_1 NUMERIC(19) NULL,
+ LONG_PROP_2 NUMERIC(19) NULL,
+ DEC_PROP_1 NUMERIC(13,4) NULL,
+ DEC_PROP_2 NUMERIC(13,4) NULL,
+ BOOL_PROP_1 VARCHAR(1) NULL,
+ BOOL_PROP_2 VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_BLOB_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ BLOB_DATA IMAGE NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
+ FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+ REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_CALENDARS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ CALENDAR_NAME VARCHAR(200) NOT NULL,
+ CALENDAR IMAGE NOT NULL,
+ PRIMARY KEY (SCHED_NAME,CALENDAR_NAME)
+);
+
+CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP)
+);
+
+CREATE TABLE QRTZ_FIRED_TRIGGERS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ ENTRY_ID VARCHAR(95) NOT NULL,
+ TRIGGER_NAME VARCHAR(200) NOT NULL,
+ TRIGGER_GROUP VARCHAR(200) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ FIRED_TIME NUMERIC(19) NOT NULL,
+ SCHED_TIME NUMERIC(19) NOT NULL,
+ PRIORITY INTEGER NOT NULL,
+ STATE VARCHAR(16) NOT NULL,
+ JOB_NAME VARCHAR(200) NULL,
+ JOB_GROUP VARCHAR(200) NULL,
+ IS_NONCONCURRENT VARCHAR(1) NULL,
+ REQUESTS_RECOVERY VARCHAR(1) NULL,
+ PRIMARY KEY (SCHED_NAME,ENTRY_ID)
+);
+
+CREATE TABLE QRTZ_SCHEDULER_STATE
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ INSTANCE_NAME VARCHAR(200) NOT NULL,
+ LAST_CHECKIN_TIME NUMERIC(19) NOT NULL,
+ CHECKIN_INTERVAL NUMERIC(19) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,INSTANCE_NAME)
+);
+
+CREATE TABLE QRTZ_LOCKS
+(
+ SCHED_NAME VARCHAR(120) NOT NULL,
+ LOCK_NAME VARCHAR(40) NOT NULL,
+ PRIMARY KEY (SCHED_NAME,LOCK_NAME)
+);
+
+create index idx_qrtz_j_req_recovery on QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_j_grp on QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
+
+create index idx_qrtz_t_j on QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_t_jg on QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_t_c on QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
+create index idx_qrtz_t_g on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+create index idx_qrtz_t_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
+create index idx_qrtz_t_n_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_n_g_state on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
+create index idx_qrtz_t_next_fire_time on QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st on QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
+create index idx_qrtz_t_nft_st_misfire on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
+create index idx_qrtz_t_nft_st_misfire_grp on QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
+
+create index idx_qrtz_ft_trig_inst_name on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
+create index idx_qrtz_ft_inst_job_req_rcvry on QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
+create index idx_qrtz_ft_j_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
+create index idx_qrtz_ft_jg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
+create index idx_qrtz_ft_t_g on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
+create index idx_qrtz_ft_tg on QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
+
+commit;
+
+CREATE TABLE workflow (
+ workflowId VARCHAR(255), workflowName TEXT,
+ parentWorkflowId VARCHAR(255),
+ workflowContext TEXT, userName TEXT,
+ startTime NUMERIC(19), lastUpdateTime NUMERIC(19),
+ numJobsTotal INTEGER, numJobsCompleted INTEGER,
+ inputBytes NUMERIC(19), outputBytes NUMERIC(19),
+ duration NUMERIC(19),
+ PRIMARY KEY (workflowId),
+ FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId) ON DELETE CASCADE
+);
+
+CREATE TABLE job (
+ jobId VARCHAR(255), workflowId VARCHAR(255), jobName TEXT, workflowEntityName TEXT,
+ userName TEXT, queue TEXT, acls TEXT, confPath TEXT,
+ submitTime NUMERIC(19), launchTime NUMERIC(19), finishTime NUMERIC(19),
+ maps INTEGER, reduces INTEGER, status TEXT, priority TEXT,
+ finishedMaps INTEGER, finishedReduces INTEGER,
+ failedMaps INTEGER, failedReduces INTEGER,
+ mapsRuntime NUMERIC(19), reducesRuntime NUMERIC(19),
+ mapCounters TEXT, reduceCounters TEXT, jobCounters TEXT,
+ inputBytes NUMERIC(19), outputBytes NUMERIC(19),
+ PRIMARY KEY(jobId),
+ FOREIGN KEY(workflowId) REFERENCES workflow(workflowId) ON DELETE CASCADE
+);
+
+CREATE TABLE task (
+ taskId VARCHAR(255), jobId VARCHAR(255), taskType TEXT, splits TEXT,
+ startTime NUMERIC(19), finishTime NUMERIC(19), status TEXT, error TEXT, counters TEXT,
+ failedAttempt TEXT,
+ PRIMARY KEY(taskId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId) ON DELETE CASCADE
+);
+
+CREATE TABLE taskAttempt (
+ taskAttemptId VARCHAR(255), taskId VARCHAR(255), jobId VARCHAR(255), taskType TEXT, taskTracker TEXT,
+ startTime NUMERIC(19), finishTime NUMERIC(19),
+ mapFinishTime NUMERIC(19), shuffleFinishTime NUMERIC(19), sortFinishTime NUMERIC(19),
+ locality TEXT, avataar TEXT,
+ status TEXT, error TEXT, counters TEXT,
+ inputBytes NUMERIC(19), outputBytes NUMERIC(19),
+ PRIMARY KEY(taskAttemptId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId) ON DELETE CASCADE,
+ FOREIGN KEY(taskId) REFERENCES task(taskId) ON DELETE CASCADE
+);
+
+CREATE TABLE hdfsEvent (
+ timestamp NUMERIC(19),
+ userName TEXT,
+ clientIP TEXT,
+ operation TEXT,
+ srcPath TEXT,
+ dstPath TEXT,
+ permissions TEXT
+);
+
+CREATE TABLE mapreduceEvent (
+ timestamp NUMERIC(19),
+ userName TEXT,
+ clientIP TEXT,
+ operation TEXT,
+ target TEXT,
+ result TEXT,
+ description TEXT,
+ permissions TEXT
+);
+
+CREATE TABLE clusterEvent (
+ timestamp NUMERIC(19),
+ service TEXT, status TEXT,
+ error TEXT, data TEXT ,
+ host TEXT, rack TEXT
+);
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-DROP.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-DROP.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-DROP.sql
new file mode 100644
index 0000000..e69de29
http://git-wip-us.apache.org/repos/asf/ambari/blob/49c7e74b/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 12efdb6..d6b7c93 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -929,7 +929,7 @@ class TestAmbariServer(TestCase):
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_share_jars(self, printInfoMsg_mock, globMock):
globMock.return_value = ["one", "two"]
- expected = "one:two:one:two:one:two"
+ expected = "one:two:one:two:one:two:one:two"
result = get_share_jars()
self.assertEqual(expected, result)
globMock.return_value = []
@@ -4190,7 +4190,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
get_ambari_properties_mock.return_value = properties
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
- isfile_mock.side_effect = [False, True, False]
+ isfile_mock.side_effect = [False, True, False, False]
try:
upgrade(args)
@@ -4402,7 +4402,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
get_ambari_properties_mock.return_value = props
exists_mock.return_value = True
lexists_mock.return_value = True
- isfile_mock.side_effect = [True, False, False]
+ isfile_mock.side_effect = [True, False, False, False]
upgrade(args)
self.assertTrue(os_remove_mock.called)
@@ -5129,7 +5129,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
isdir_mock.return_value = True
- isfile_mock.side_effect = [True, False, False]
+ isfile_mock.side_effect = [True, False, False, False]
del args.database_index
del args.persistence_type
@@ -5151,7 +5151,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
get_ambari_properties_mock.reset_mock()
os_symlink_mock.reset_mock()
- isfile_mock.side_effect = [False, False, False]
+ isfile_mock.side_effect = [False, False, False, False]
check_jdbc_drivers(args)