You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2013/06/08 22:37:04 UTC
svn commit: r1491057 [1/2] - in
/incubator/ambari/branches/branch-1.2.4/ambari-server: ./ conf/unix/ sbin/
src/main/python/ src/main/resources/ src/test/python/
Author: mahadev
Date: Sat Jun 8 20:36:54 2013
New Revision: 1491057
URL: http://svn.apache.org/r1491057
Log:
AMBARI-2331. Ambari Database setup process needs cleanup. (Myroslav Papirkovskyy via mahadev)
Added:
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-DROP.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-DROP.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-DROP.sql
Removed:
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/mysql-ddl.sql
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/oracle-DDL.sql
Modified:
incubator/ambari/branches/branch-1.2.4/ambari-server/conf/unix/ambari.properties
incubator/ambari/branches/branch-1.2.4/ambari-server/pom.xml
incubator/ambari/branches/branch-1.2.4/ambari-server/sbin/ambari-server
incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/python/ambari-server.py
incubator/ambari/branches/branch-1.2.4/ambari-server/src/test/python/TestAmbaryServer.py
Modified: incubator/ambari/branches/branch-1.2.4/ambari-server/conf/unix/ambari.properties
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/conf/unix/ambari.properties?rev=1491057&r1=1491056&r2=1491057&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/conf/unix/ambari.properties (original)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/conf/unix/ambari.properties Sat Jun 8 20:36:54 2013
@@ -26,6 +26,5 @@ webapp.dir=/usr/lib/ambari-server/web
bootstrap.dir=/var/run/ambari-server/bootstrap
bootstrap.script=/usr/lib/python2.6/site-packages/ambari_server/bootstrap.py
bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py
-server.persistence.inMemory=false
api.authenticate=true
server.connection.max.idle.millis=900000
Modified: incubator/ambari/branches/branch-1.2.4/ambari-server/pom.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/pom.xml?rev=1491057&r1=1491056&r2=1491057&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/pom.xml (original)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/pom.xml Sat Jun 8 20:36:54 2013
@@ -260,6 +260,24 @@
<source>
<location>src/main/resources/Ambari-DDL-Postgres-DROP.sql</location>
</source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-Postgres-REMOTE-DROP.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-Oracle-CREATE.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-MySQL-CREATE.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-Oracle-DROP.sql</location>
+ </source>
+ <source>
+ <location>src/main/resources/Ambari-DDL-MySQL-DROP.sql</location>
+ </source>
</sources>
</mapping>
<mapping>
Modified: incubator/ambari/branches/branch-1.2.4/ambari-server/sbin/ambari-server
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/sbin/ambari-server?rev=1491057&r1=1491056&r2=1491057&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/sbin/ambari-server (original)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/sbin/ambari-server Sat Jun 8 20:36:54 2013
@@ -86,13 +86,6 @@ case "$1" in
$PYTHON /usr/sbin/ambari-server.py $@
;;
setup)
- echo -e "Run postgresql initdb"
- initdb_res=`/sbin/service postgresql initdb`
- if [ "0" == "$?" ]; then
- echo -e "${initdb_res}"
- fi
- echo -e "Run postgresql start"
- /sbin/service postgresql start
echo -e "Setup ambari-server"
$PYTHON /usr/sbin/ambari-server.py $@
;;
Modified: incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/python/ambari-server.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/python/ambari-server.py?rev=1491057&r1=1491056&r2=1491057&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/python/ambari-server.py (original)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/python/ambari-server.py Sat Jun 8 20:36:54 2013
@@ -37,7 +37,6 @@ import getpass
# debug settings
VERBOSE = False
SILENT = False
-REMOTE_DATABASE = False
SERVER_START_DEBUG = False
# action commands
@@ -99,12 +98,14 @@ JAVA_HOME="JAVA_HOME"
PID_DIR="/var/run/ambari-server"
PID_NAME="ambari-server.pid"
AMBARI_PROPERTIES_FILE="ambari.properties"
+AMBARI_PROPERTIES_RPMSAVE_FILE="ambari.properties.rpmsave"
SETUP_DB_CMD = ['su', '-', 'postgres',
'--command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'"']
UPGRADE_STACK_CMD = ['su', 'postgres',
'--command=psql -f {0} -v stack_name="\'{1}\'" -v stack_version="\'{2}\'"']
PG_ST_CMD = "/sbin/service postgresql status"
+PG_INITDB_CMD = "/sbin/service postgresql initdb"
PG_START_CMD = "/sbin/service postgresql start"
PG_RESTART_CMD = "/sbin/service postgresql restart"
PG_STATUS_RUNNING = "running"
@@ -114,6 +115,12 @@ PG_HBA_CONF_FILE_BACKUP = PG_HBA_DIR + "
POSTGRESQL_CONF_FILE = PG_HBA_DIR + "postgresql.conf"
PG_HBA_RELOAD_CMD = "su postgres --command='pg_ctl -D {0} reload'"
PG_DEFAULT_PASSWORD = "bigdata"
+
+JDBC_DATABASE_PROPERTY = "server.jdbc.database"
+JDBC_HOSTNAME_PROPERTY = "server.jdbc.hostname"
+JDBC_PORT_PROPERTY = "server.jdbc.port"
+JDBC_SCHEMA_PROPERTY = "server.jdbc.schema"
+
JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
JDBC_PASSWORD_FILE_PROPERTY = "server.jdbc.user.passwd"
JDBC_PASSWORD_FILENAME = "password.dat"
@@ -123,13 +130,45 @@ PERSISTENCE_TYPE_PROPERTY = "server.pers
JDBC_DRIVER_PROPERTY = "server.jdbc.driver"
JDBC_URL_PROPERTY = "server.jdbc.url"
+JDBC_RCA_DATABASE_PROPERTY = "server.jdbc.database"
+JDBC_RCA_HOSTNAME_PROPERTY = "server.jdbc.hostname"
+JDBC_RCA_PORT_PROPERTY = "server.jdbc.port"
+JDBC_RCA_SCHEMA_PROPERTY = "server.jdbc.schema"
+
JDBC_RCA_DRIVER_PROPERTY = "server.jdbc.rca.driver"
JDBC_RCA_URL_PROPERTY = "server.jdbc.rca.url"
JDBC_RCA_USER_NAME_PROPERTY = "server.jdbc.rca.user.name"
JDBC_RCA_PASSWORD_FILE_PROPERTY = "server.jdbc.rca.user.passwd"
-DRIVER_NAMES = ["org.postgresql.Driver", "oracle.jdbc.driver.OracleDriver", "com.mysql.jdbc.Driver"]
-CONNECTION_STRINGS = ["jdbc:postgresql://{0}:{1}/{2}", "jdbc:oracle:thin:@{0}:{1}/{2}", "jdbc:mysql://{0}:{1}/{2}"]
+CHECK_COMMAND_EXIST_CMD = "type {0}"
+
+DATABASE_INDEX = 0
+PROMPT_DATABASE_OPTIONS = False
+USERNAME_PATTERN = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
+PASSWORD_PATTERN = "^[a-zA-Z0-9_-]*$"
+DATABASE_NAMES =["postgres", "oracle", "mysql"]
+DATABASE_STORAGE_NAMES =["database","service","schema"]
+DATABASE_PORTS =["5432", "1521", "3306"]
+DATABASE_DRIVER_NAMES = ["org.postgresql.Driver", "oracle.jdbc.driver.OracleDriver", "com.mysql.jdbc.Driver"]
+DATABASE_CONNECTION_STRINGS = ["jdbc:postgresql://{0}:{1}/{2}", "jdbc:oracle:thin:@{0}:{1}/{2}", "jdbc:mysql://{0}:{1}/{2}"]
+DATABASE_CLI_TOOLS = [["psql"], ["sqlplus", "sqlplus64"], ["mysql"]]
+DATABASE_INIT_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-CREATE.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql']
+DATABASE_DROP_SCRIPTS = ['/var/lib/ambari-server/resources/Ambari-DDL-Postgres-REMOTE-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-Oracle-DROP.sql',
+ '/var/lib/ambari-server/resources/Ambari-DDL-MySQL-DROP.sql']
+DATABASE_URL_REGEX = ["jdbc:postgresql://([a-zA-Z0-9._]+):(\d+)/(.+)",
+ "jdbc:oracle:thin:@([a-zA-Z0-9._]+):(\d+)/(.+)",
+ "jdbc:mysql://([a-zA-Z0-9._]+):(\d*)/(.+)"]
+
+REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
+REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
+
+POSTGRES_EXEC_ARGS = "-h {0} -p {1} -d {2} -U {3} -f {4} -v username='\"{3}\"'"
+ORACLE_EXEC_ARGS = "-S '{0}/{1}@(description=(address=(protocol=TCP)(host={2})(port={3}))(connect_data=(sid={4})))' @{5} {0}"
+MYSQL_EXEC_ARGS = "--host={0} --port={1} --user={2} --password={3} {4} " \
+ "-e\"set @schema=\'{4}\'; set @username=\'{2}\'; source {5};\""
# jdk commands
@@ -144,19 +183,45 @@ OS_TYPE_PROPERTY = "server.os_type"
JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
+#JCE Policy files
+JCE_POLICY_FILENAME = "jce_policy-6.zip"
+JCE_DOWNLOAD_CMD = "curl -o {0} {1}"
+JCE_MIN_FILESIZE = 5000
+
+#Apache License Header
+ASF_LICENSE_HEADER = '''
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+'''
+
def configure_pg_hba_ambaridb_users():
args = optparse.Values()
- configure_postgres_username_password(args)
+ configure_database_username_password(args)
with open(PG_HBA_CONF_FILE, "a") as pgHbaConf:
pgHbaConf.write("\n")
- pgHbaConf.write("local all " + args.postgres_username +
+ pgHbaConf.write("local all " + args.database_username +
",mapred md5")
pgHbaConf.write("\n")
- pgHbaConf.write("host all " + args.postgres_username +
+ pgHbaConf.write("host all " + args.database_username +
",mapred 0.0.0.0/0 md5")
pgHbaConf.write("\n")
- pgHbaConf.write("host all " + args.postgres_username +
+ pgHbaConf.write("host all " + args.database_username +
",mapred ::/0 md5")
pgHbaConf.write("\n")
command = PG_HBA_RELOAD_CMD.format(PG_HBA_DIR)
@@ -252,39 +317,19 @@ def write_property(key, value):
def setup_db(args):
#password access to ambari-server and mapred
- configure_postgres_username_password(args)
- dbname = args.postgredbname
- file = args.init_script_file
- username = args.postgres_username
- password = args.postgres_password
+ configure_database_username_password(args)
+ dbname = args.database_name
+ scriptFile = args.init_script_file
+ username = args.database_username
+ password = args.database_password
command = SETUP_DB_CMD[:]
- command[-1] = command[-1].format(file, username, password)
+ command[-1] = command[-1].format(scriptFile, username, password)
retcode, outdata, errdata = run_os_command(command)
if not retcode == 0:
print errdata
return retcode
-def setup_remote_db(args):
- print "WARNING! To use MySQL/Oracle database place JDBC driver to "+ get_ambari_jars()
- print "Table structure in remote database should be created manually in this mode."
- (driver_name, conn_url, username, password) = get_connection_properties()
-
- write_property(PERSISTENCE_TYPE_PROPERTY, "remote")
- write_property(JDBC_DRIVER_PROPERTY, driver_name)
- write_property(JDBC_URL_PROPERTY, conn_url)
- write_property(JDBC_USER_NAME_PROPERTY, username)
- write_property(JDBC_PASSWORD_FILE_PROPERTY, store_password_file(password, JDBC_PASSWORD_FILENAME))
-
- ok = get_YN_input("Enter separate configuration for RCA database [y/n] (n)? ", False)
- if ok:
- (driver_name, conn_url, username, password) = get_connection_properties()
-
- write_property(JDBC_RCA_DRIVER_PROPERTY, driver_name)
- write_property(JDBC_RCA_URL_PROPERTY, conn_url)
- write_property(JDBC_RCA_USER_NAME_PROPERTY, username)
- write_property(JDBC_RCA_PASSWORD_FILE_PROPERTY, store_password_file(password, JDBC_RCA_PASSWORD_FILENAME))
- return 0
def store_password_file(password, filename):
conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
@@ -298,68 +343,12 @@ def store_password_file(password, filena
return passFilePath
-def get_connection_properties():
- default_db_num="1"
- default_host = "localhost"
- default_schema = "ambari"
-
- database_num = get_validated_string_input("Select database:\n1 - Postgres\n2 - Oracle\n3 - MySQL) \n["+str(default_db_num)+"]:",
- default_db_num,
- "^[123]$",
- "Invalid number.",
- False
- )
-
- db_host = get_validated_string_input("Hostname ["+default_host+"]:",
- default_host,
- "^[a-zA-Z0-9.\-]*$",
- "Invalid hostname.",
- False
- )
-
- default_port = None
- if database_num == "1":
- default_port = "5432"
- elif database_num == "2":
- default_port = "1521"
- elif database_num == "3":
- default_port = "3306"
-
- db_port = get_validated_string_input("Port ["+ str(default_port) + "]:",
- default_port,
- "^[0-9]{1,5}$",
- "Invalid port.",
- False
- )
-
- if database_num == "2":
- default_schema = "xe"
-
- db_schema = get_validated_string_input("Database/schema/service name ["+ str(default_schema) + "]:",
- default_schema,
- "^[a-zA-z\-\"]+$",
- "Invalid schema name.",
- False
- )
-
- usernameDefault = 'ambari'
- usernamePrompt = 'Username [' + usernameDefault + ']: '
- usernamePattern = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
- usernameDescr = "Invalid characters in username. Start with _ or alpha "\
- "followed by alphanumeric or _ or - characters"
-
- username = get_validated_string_input(usernamePrompt, usernameDefault,
- usernamePattern, usernameDescr, False)
- password = configure_postgres_password()
-
- return DRIVER_NAMES[int(database_num)-1], CONNECTION_STRINGS[int(database_num)-1].format(db_host, db_port, db_schema), username, password
-
def execute_db_script(args, file):
#password access to ambari-server and mapred
- configure_postgres_username_password(args)
- dbname = args.postgredbname
- username = args.postgres_username
- password = args.postgres_password
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
command = SETUP_DB_CMD[:]
command[-1] = command[-1].format(file, username, password)
retcode, outdata, errdata = run_os_command(command)
@@ -370,10 +359,10 @@ def execute_db_script(args, file):
def check_db_consistency(args, file):
#password access to ambari-server and mapred
- configure_postgres_username_password(args)
- dbname = args.postgredbname
- username = args.postgres_username
- password = args.postgres_password
+ configure_database_username_password(args)
+ dbname = args.database_name
+ username = args.database_username
+ password = args.database_password
command = SETUP_DB_CMD[:]
command[-1] = command[-1].format(file, username, password)
retcode, outdata, errdata = run_os_command(command)
@@ -391,8 +380,8 @@ def check_db_consistency(args, file):
def upgrade_stack(args, stack_id):
#password access to ambari-server and mapred
- configure_postgres_username_password(args)
- dbname = args.postgredbname
+ configure_database_username_password(args)
+ dbname = args.database_name
file = args.upgrade_stack_script_file
stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
command = UPGRADE_STACK_CMD[:]
@@ -688,6 +677,10 @@ def check_postgre_up():
print_info_msg ("PostgreSQL is running")
return 0
else:
+ print "Run initdb"
+ retcode, out, err = run_os_command(PG_INITDB_CMD)
+ if retcode == 0:
+ print out
print "About to start PostgreSQL"
retcode, out, err = run_os_command(PG_START_CMD)
return retcode
@@ -773,6 +766,16 @@ def find_jdk():
print "Selected JDK {0}".format(jdkPath)
return jdkPath
+#
+# Checks if options determine local DB configuration
+#
+def is_local_database(options):
+ if options.database == DATABASE_NAMES[0] \
+ and options.database_host == "localhost" \
+ and options.database_port == DATABASE_PORTS[0] \
+ and options.database_name == "ambari":
+ return True
+ return False
#
# Setup the Ambari Server.
@@ -791,7 +794,12 @@ def setup(args):
print_error_msg ('Failed to stop iptables. Exiting.')
sys.exit(retcode)
- if not REMOTE_DATABASE:
+ print 'Configuring database...'
+ prompt_db_properties(args)
+
+ if is_local_database(args):
+ print 'Default properties detected. Using built-in database.'
+ store_local_properties(args)
print 'Checking PostgreSQL...'
retcode = check_postgre_up()
@@ -812,12 +820,21 @@ def setup(args):
sys.exit(retcode)
else:
- print 'Configuring remote database connection properties'
+ retcode = store_remote_properties(args)
+ if retcode != 0:
+ print_error_msg('Unable to save config file')
+ sys.exit(retcode)
+
+ print_warning_msg('Before starting server JDBC driver for {0} should be placed to {1}.'.format(args.database, JAVA_SHARE_PATH))
+
+ print 'Configuring remote database connection properties...'
retcode = setup_remote_db(args)
if not retcode == 0:
print_error_msg ('Error while configuring connection properties. Exiting')
sys.exit(retcode)
-
+
+
+
print 'Checking JDK...'
retcode = download_jdk(args)
if not retcode == 0:
@@ -831,7 +848,12 @@ def setup(args):
'ambari.properties failed. Exiting.')
sys.exit(retcode)
- print "Ambari Server 'setup' finished successfully"
+ if args.warnings:
+ print "Ambari Server 'setup' finished with warnings:"
+ for warning in args.warnings:
+ print warning
+ else:
+ print "Ambari Server 'setup' finished successfully"
@@ -863,20 +885,39 @@ def reset(args):
print "Reseting the Server database..."
- configure_postgres_username_password(args)
- dbname = args.postgredbname
- filename = args.drop_script_file
- username = args.postgres_username
- password = args.postgres_password
- command = SETUP_DB_CMD[:]
- command[-1] = command[-1].format(filename, username, password)
- retcode, outdata, errdata = run_os_command(command)
- if not retcode == 0:
- print errdata
- return retcode
+ parse_properties(args)
- print_info_msg ("About to run database setup")
- setup_db(args)
+ # configure_database_username_password(args)
+ if args.persistence_type=="remote":
+ if get_db_cli_tool(args) != -1:
+ retcode, out, err = execute_remote_script(args, DATABASE_DROP_SCRIPTS[DATABASE_INDEX])
+ if not retcode == 0:
+ print err
+ return retcode
+
+ retcode, out, err = execute_remote_script(args, DATABASE_INIT_SCRIPTS[DATABASE_INDEX])
+ if not retcode == 0:
+ print err
+ return retcode
+
+ else:
+ print_error_msg(DATABASE_CLI_TOOLS[DATABASE_INDEX] + " not found. Unable to perform automatic reset.")
+ return -1
+
+ else:
+ dbname = args.database_name
+ filename = args.drop_script_file
+ username = args.database_username
+ password = args.database_password
+ command = SETUP_DB_CMD[:]
+ command[-1] = command[-1].format(filename, username, password)
+ retcode, outdata, errdata = run_os_command(command)
+ if not retcode == 0:
+ print errdata
+ return retcode
+
+ print_info_msg ("About to run database setup")
+ setup_db(args)
print "Ambari Server 'reset' complete"
@@ -948,33 +989,41 @@ def stop(args):
# Upgrades the Ambari Server.
#
def upgrade(args):
- print 'Checking PostgreSQL...'
- retcode = check_postgre_up()
- if not retcode == 0:
- printErrorMsg('PostgreSQL server not running. Exiting')
- sys.exit(retcode)
+ parse_properties(args)
+ if args.persistence_type == "remote":
- file = args.upgrade_script_file
- print 'Upgrading database...'
- retcode = execute_db_script(args, file)
- if not retcode == 0:
- printErrorMsg('Database upgrade script has failed. Exiting.')
- sys.exit(retcode)
-
- print 'Checking database integrity...'
- check_file = file[:-3] + "Check" + file[-4:]
- retcode = check_db_consistency(args, check_file)
- if not retcode == 0:
- print 'Found inconsistency. Trying to fix...'
- fix_file = file[:-3] + "Fix" + file[-4:]
- retcode = execute_db_script(args, fix_file)
+ pass
+ else:
+ print 'Checking PostgreSQL...'
+ retcode = check_postgre_up()
+ if not retcode == 0:
+ print_error_msg('PostgreSQL server not running. Exiting')
+ sys.exit(retcode)
+ file = args.upgrade_script_file
+ print 'Upgrading database...'
+ retcode = execute_db_script(args, file)
if not retcode == 0:
- printErrorMsg('Database cannot be fixed. Exiting.')
+ print_error_msg('Database upgrade script has failed. Exiting.')
sys.exit(retcode)
- else:
- print 'Database is consistent.'
+
+ print 'Checking database integrity...'
+ check_file = file[:-3] + "Check" + file[-4:]
+ retcode = check_db_consistency(args, check_file)
+
+ if not retcode == 0:
+ print 'Found inconsistency. Trying to fix...'
+ fix_file = file[:-3] + "Fix" + file[-4:]
+ retcode = execute_db_script(args, fix_file)
+
+ if not retcode == 0:
+ print_error_msg('Database cannot be fixed. Exiting.')
+ sys.exit(retcode)
+ else:
+ print 'Database is consistent.'
+
+
print "Ambari Server 'upgrade' finished successfully"
@@ -1073,11 +1122,10 @@ def get_validated_string_input(prompt, d
-def configure_postgres_password():
+def read_password(passwordDefault = PG_DEFAULT_PASSWORD):
# setup password
- passwordDefault = PG_DEFAULT_PASSWORD
passwordPrompt = 'Password [' + passwordDefault + ']: '
- passwordPattern = "^[a-zA-Z0-9_-]*$"
+ passwordPattern = PASSWORD_PATTERN
passwordDescr = "Invalid characters in password. Use only alphanumeric or " \
"_ or - characters"
@@ -1088,7 +1136,7 @@ def configure_postgres_password():
passwordDefault, passwordPattern, passwordDescr, True)
if password != password1:
print "Passwords do not match"
- password = configure_postgres_password()
+ password = read_password()
return password
@@ -1099,8 +1147,200 @@ def get_pass_file_path(conf_file):
JDBC_PASSWORD_FILENAME)
+def load_default_db_properties(args):
+ args.database=DATABASE_NAMES[DATABASE_INDEX]
+ args.database_host = "localhost"
+ args.database_port = DATABASE_PORTS[DATABASE_INDEX]
+ args.database_name = "ambari"
+ args.database_username = "ambari"
+ args.database_password = "bigdata"
+ pass
+
+def prompt_db_properties(args):
+ global DATABASE_INDEX
+
+ if PROMPT_DATABASE_OPTIONS:
+ load_default_db_properties(args)
+ ok = get_YN_input("Enter advanced database configuration [y/n] (n)? ", False)
+ if ok:
+
+ database_num = str(DATABASE_INDEX + 1)
+ database_num = get_validated_string_input(
+ "Select database:\n1 - Postgres\n2 - Oracle\n3 - MySQL \n[" + database_num + "]:",
+ database_num,
+ "^[123]$",
+ "Invalid number.",
+ False
+ )
+
+ DATABASE_INDEX = int(database_num) - 1
+ args.database = DATABASE_NAMES[DATABASE_INDEX]
+
+ args.database_host = get_validated_string_input(
+ "Hostname [" + args.database_host + "]:",
+ args.database_host,
+ "^[a-zA-Z0-9.\-]*$",
+ "Invalid hostname.",
+ False
+ )
+
+ args.database_port=DATABASE_PORTS[DATABASE_INDEX]
+ args.database_port = get_validated_string_input(
+ "Port [" + args.database_port + "]:",
+ args.database_port,
+ "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$",
+ "Invalid port.",
+ False
+ )
+
+ args.database_name = get_validated_string_input(
+ DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " name [" + args.database_name + "]:",
+ args.database_name,
+ "^[a-zA-z\-\"]+$",
+ "Invalid " + DATABASE_STORAGE_NAMES[DATABASE_INDEX] + " name.",
+ False
+ )
+
+ args.database_username = get_validated_string_input(
+ 'Username [' + args.database_username + ']: ',
+ args.database_username,
+ USERNAME_PATTERN,
+ "Invalid characters in username. Start with _ or alpha "
+ "followed by alphanumeric or _ or - characters",
+ False
+ )
+
+ args.database_password = read_password(args.database_password)
+
+
+ print_info_msg('Using database options: {database},{host},{port},{schema},{user},{password}'.format(
+ database=args.database,
+ host=args.database_host,
+ port=args.database_port,
+ schema=args.database_name,
+ user=args.database_username,
+ password=args.database_password
+ ))
+
+
+
+def store_remote_properties(args):
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ properties = Properties()
+
+ try:
+ properties.load(open(conf_file))
+ except Exception, e:
+ print 'Could not read ambari config file "%s": %s' % (conf_file, e)
+ return -1
+
+ properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
+
+ properties.process_pair(JDBC_DATABASE_PROPERTY, args.database)
+ properties.process_pair(JDBC_HOSTNAME_PROPERTY, args.database_host)
+ properties.process_pair(JDBC_PORT_PROPERTY, args.database_port)
+ properties.process_pair(JDBC_SCHEMA_PROPERTY, args.database_name)
+
+ properties.process_pair(JDBC_DRIVER_PROPERTY, DATABASE_DRIVER_NAMES[DATABASE_INDEX])
+ properties.process_pair(JDBC_URL_PROPERTY, DATABASE_CONNECTION_STRINGS[DATABASE_INDEX].format(args.database_host, args.database_port, args.database_name))
+ properties.process_pair(JDBC_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_PASSWORD_FILE_PROPERTY, store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+
+ properties.process_pair(JDBC_RCA_DRIVER_PROPERTY, DATABASE_DRIVER_NAMES[DATABASE_INDEX])
+ properties.process_pair(JDBC_RCA_URL_PROPERTY, DATABASE_CONNECTION_STRINGS[DATABASE_INDEX].format(args.database_host, args.database_port, args.database_name))
+ properties.process_pair(JDBC_RCA_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ print 'Could not write ambari config file "%s": %s' % (conf_file, e)
+ return -1
+
+ return 0
+
+def setup_remote_db(args):
+
+ retcode, out, err = execute_remote_script(args, DATABASE_INIT_SCRIPTS[DATABASE_INDEX])
+ if retcode != 0:
+ print err
+ print_error_msg('Database bootstrap failed. Please, provide correct connection properties.')
+ return retcode
+ pass
+
+ return 0
+
+def get_db_cli_tool(args):
+ for tool in DATABASE_CLI_TOOLS[DATABASE_INDEX]:
+ cmd =CHECK_COMMAND_EXIST_CMD.format(tool)
+ ret, out, err = run_in_shell(cmd)
+ if ret == 0:
+ return get_exec_path(tool)
+
+ return None
+
+def get_exec_path(cmd):
+ cmd = 'which {0}'.format(cmd)
+ ret, out, err = run_in_shell(cmd)
+ if ret == 0:
+ return out.strip()
+ else:
+ return None
+
+def run_in_shell(cmd):
+ print_info_msg('about to run command: ' + str(cmd))
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True
+ )
+ (stdoutdata, stderrdata) = process.communicate()
+ return process.returncode, stdoutdata, stderrdata
+
-def configure_postgres_username_password(args):
+def execute_remote_script(args, scriptPath):
+ tool = get_db_cli_tool(args)
+ if not tool:
+ args.warnings.append('{0} not found. Please, run DDL script manually'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
+ print_warning_msg('{0} not found'.format(DATABASE_CLI_TOOLS[DATABASE_INDEX]))
+ return -1
+
+ if args.database == "postgres":
+
+ os.environ["PGPASSWORD"] = args.database_password
+ retcode, out, err = run_in_shell('{0} {1}'.format(tool, POSTGRES_EXEC_ARGS.format(
+ args.database_host,
+ args.database_port,
+ args.database_name,
+ args.database_username,
+ scriptPath
+ )))
+ return retcode, out, err
+ elif args.database == "oracle":
+ retcode, out, err = run_in_shell('{0} {1}'.format(tool, ORACLE_EXEC_ARGS.format(
+ args.database_username,
+ args.database_password,
+ args.database_host,
+ args.database_port,
+ args.database_name,
+ scriptPath
+ )))
+ return retcode, out, err
+ elif args.database=="mysql":
+ retcode, out, err = run_in_shell('{0} {1}'.format(tool, MYSQL_EXEC_ARGS.format(
+ args.database_host,
+ args.database_port,
+ args.database_username,
+ args.database_password,
+ args.database_name,
+ scriptPath
+ )))
+ return retcode, out, err
+
+ return -1, "Wrong database", "Wrong database"
+
+def configure_database_username_password(args):
conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
properties = Properties()
@@ -1115,52 +1355,67 @@ def configure_postgres_username_password
if username and passFilePath:
print_info_msg("Database username + password already configured - skipping")
- args.postgres_username=username
- args.postgres_password = open(passFilePath).read()
+ args.database_username=username
+ args.database_password = open(passFilePath).read()
return 1
+ else:
+ print_error_msg("Connection properties not set in config file.")
- # setup username
- usernameDefault = 'ambari-server'
- usernamePrompt = 'Username [' + usernameDefault + ']: '
- usernamePattern = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
- usernameDescr = "Invalid characters in username. Start with _ or alpha " \
- "followed by alphanumeric or _ or - characters"
- username = usernameDefault
- # setup password
- password = PG_DEFAULT_PASSWORD
+def store_local_properties(args):
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ properties = Properties()
- ok = get_YN_input("Enter advanced database configuration [y/n] (n)? ", False)
- if ok:
- username = get_validated_string_input(usernamePrompt, usernameDefault,
- usernamePattern, usernameDescr, False)
- print "Database username set to: " + username
- password = configure_postgres_password()
-
- passFilePath = get_pass_file_path(conf_file)
-
- print_info_msg ("Database username set to: " + username)
- print_info_msg ("Database password set to: " + password)
-
- with open(passFilePath, 'w+') as passFile:
- passFile.write(password)
- pass
- os.chmod(passFilePath, stat.S_IREAD | stat.S_IWRITE)
+ try:
+ properties.load(open(conf_file))
+ except Exception, e:
+ print 'Could not read ambari config file "%s": %s' % (conf_file, e)
+ return -1
- write_property(JDBC_USER_NAME_PROPERTY, username)
- write_property(JDBC_PASSWORD_FILE_PROPERTY,passFilePath)
- args.postgres_username=username
- args.postgres_password=password
+ properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "local")
+ properties.process_pair(JDBC_USER_NAME_PROPERTY, args.database_username)
+ properties.process_pair(JDBC_PASSWORD_FILE_PROPERTY, store_password_file(args.database_password, JDBC_PASSWORD_FILENAME))
+ try:
+ properties.store(open(conf_file, "w"))
+ except Exception, e:
+ print 'Could not write ambari config file "%s": %s' % (conf_file, e)
+ return -1
+
+ return 0
+
+def parse_properties(args):
+ conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
+ properties = Properties()
+
+ try:
+ properties.load(open(conf_file))
+ except Exception, e:
+ print 'Could not read ambari config file "%s": %s' % (conf_file, e)
+ return -1
+
+ args.persistence_type = properties[PERSISTENCE_TYPE_PROPERTY]
+
+ if args.persistence_type == 'remote':
+ args.database = properties[JDBC_DATABASE_PROPERTY]
+ args.database_host = properties[JDBC_HOSTNAME_PROPERTY]
+ args.database_port = properties[JDBC_PORT_PROPERTY]
+ args.database_name = properties[JDBC_SCHEMA_PROPERTY]
+ global DATABASE_INDEX
+ DATABASE_INDEX = DATABASE_NAMES.index(args.database)
+
+ args.database_username = properties[JDBC_USER_NAME_PROPERTY]
+ args.database_password = open(properties[JDBC_PASSWORD_FILE_PROPERTY]).read()
+
+ return 0
#
# Main.
#
def main():
parser = optparse.OptionParser(usage="usage: %prog [options] action [stack_id]",)
- parser.add_option('-d', '--postgredbname', default='ambari',
- help="Database name in postgresql")
+
parser.add_option('-f', '--init-script-file',
default='/var/lib/ambari-server/'
'resources/Ambari-DDL-Postgres-CREATE.sql',
@@ -1186,9 +1441,13 @@ def main():
action="store_true", dest="silent", default=False,
help="Silently accepts default prompt values")
- parser.add_option("-b", "--remote-database",
- action="store_true", dest="remote_database", default=False,
- help="Set up remote database instead of local")
+
+ parser.add_option('--database', default=None, help ="Database to use postgres|oracle|mysql", dest="database")
+ parser.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
+ parser.add_option('--databaseport', default=None, help="Database port", dest="database_port")
+ parser.add_option('--databasename', default=None, help="Database/Schema/Service name", dest="database_name")
+ parser.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
+ parser.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
(options, args) = parser.parse_args()
@@ -1200,13 +1459,53 @@ def main():
global SILENT
SILENT = options.silent
- # skip local db setup
- global REMOTE_DATABASE
- REMOTE_DATABASE = options.remote_database
-
-
+ global DATABASE_INDEX
+ global PROMPT_DATABASE_OPTIONS
+ #perform checks
+
+ options.warnings = []
+
+ if options.database is None \
+ and options.database_host is None \
+ and options.database_port is None \
+ and options.database_name is None \
+ and options.database_username is None \
+ and options.database_password is None:
+
+ PROMPT_DATABASE_OPTIONS = True
+
+ elif not (options.database is not None
+ and options.database_host is not None
+ and options.database_port is not None
+ and options.database_name is not None
+ and options.database_username is not None
+ and options.database_password is not None):
+ parser.error('All database options should be set.')
+ pass
+ #correct database
+ if options.database is not None and options.database not in DATABASE_NAMES:
+ print "Incorrect database"
+ parser.print_help()
+ exit(-1)
+ elif options.database is not None:
+ options.database = options.database.lower()
+ DATABASE_INDEX = DATABASE_NAMES.index(options.database)
+
+ #correct port
+ if options.database_port is not None:
+ correct=False
+ try:
+ port = int(options.database_port)
+ if 65536 > port > 0:
+ correct = True
+ except ValueError:
+ pass
+ if not correct:
+ print "Incorrect database port " + options.database_port
+ parser.print_help()
+ exit(-1)
if len(args) == 0:
print parser.print_help()
@@ -1355,6 +1654,24 @@ class Properties(object):
if hasattr(self._props, name):
return getattr(self._props, name)
+ def store(self, out, header=""):
+ """ Write the properties list to the stream 'out' along
+ with the optional 'header' """
+ if out.mode[0] != 'w':
+ raise ValueError,'Steam should be opened in write mode!'
+ try:
+ out.write(''.join(('#', ASF_LICENSE_HEADER, '\n')))
+ out.write(''.join(('#',header,'\n')))
+ # Write timestamp
+ tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime())
+ out.write(''.join(('#',tstamp,'\n')))
+ # Write properties from the pristine dictionary
+ for prop, val in self._origprops.items():
+ if val is not None:
+ out.write(''.join((prop,'=',val,'\n')))
+ out.close()
+ except IOError, e:
+ raise
if __name__ == "__main__":
main()
Added: incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql?rev=1491057&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql (added)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql Sat Jun 8 20:36:54 2013
@@ -0,0 +1,185 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+-- DROP DATABASE IF EXISTS `ambari`;
+-- DROP USER `ambari`;
+
+delimiter ;
+
+# CREATE DATABASE `ambari` /*!40100 DEFAULT CHARACTER SET utf8 */;
+#
+# CREATE USER 'ambari' IDENTIFIED BY 'bigdata';
+
+# USE @schema;
+
+CREATE TABLE clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+CREATE TABLE clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data LONGTEXT NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
+CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
+CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
+CREATE TABLE componentconfigmapping (config_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, config_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, service_name));
+CREATE TABLE hostcomponentconfigmapping (config_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, config_tag VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, host_name, service_name));
+CREATE TABLE hcdesiredconfigmapping (config_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, config_tag VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, disks_info LONGTEXT NOT NULL, host_attributes LONGTEXT, ipv4 VARCHAR(255), ipv6 VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, ph_cpu_count INTEGER NOT NULL, public_host_name VARCHAR(255), rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
+CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL, PRIMARY KEY (host_name));
+CREATE TABLE servicecomponentdesiredstate (component_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (component_name, cluster_id, service_name));
+CREATE TABLE serviceconfigmapping (config_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, PRIMARY KEY (config_type, cluster_id, service_name));
+CREATE TABLE servicedesiredstate (cluster_id BIGINT NOT NULL, desired_host_role_mapping INTEGER NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name));
+CREATE TABLE roles (role_name VARCHAR(255) NOT NULL, PRIMARY KEY (role_name));
+CREATE TABLE users (user_id INTEGER NOT NULL, create_time TIMESTAMP DEFAULT NOW(), ldap_user INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255), user_password VARCHAR(255), PRIMARY KEY (user_id));
+CREATE TABLE execution_command (task_id BIGINT NOT NULL, command LONGBLOB, PRIMARY KEY (task_id));
+CREATE TABLE host_role_command (task_id BIGINT NOT NULL, attempt_count SMALLINT NOT NULL, event LONGTEXT NOT NULL, exitcode INTEGER NOT NULL, host_name VARCHAR(255) NOT NULL, last_attempt_time BIGINT NOT NULL, request_id BIGINT NOT NULL, role VARCHAR(255), role_command VARCHAR(255), stage_id BIGINT NOT NULL, start_time BIGINT NOT NULL, status VARCHAR(255), std_error LONGBLOB, std_out LONGBLOB, PRIMARY KEY (task_id));
+CREATE TABLE role_success_criteria (role VARCHAR(255) NOT NULL, request_id BIGINT NOT NULL, stage_id BIGINT NOT NULL, success_factor DOUBLE NOT NULL, PRIMARY KEY (role, request_id, stage_id));
+CREATE TABLE stage (stage_id BIGINT NOT NULL, request_id BIGINT NOT NULL, cluster_id BIGINT, log_info VARCHAR(255) NOT NULL, request_context VARCHAR(255), PRIMARY KEY (stage_id, request_id));
+CREATE TABLE key_value_store (`key` VARCHAR(255) NOT NULL, `value` LONGTEXT, PRIMARY KEY (`key`));
+CREATE TABLE clusterconfigmapping (type_name VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, cluster_id BIGINT NOT NULL, selected INTEGER NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (type_name, create_timestamp, cluster_id));
+CREATE TABLE hostconfigmapping (create_timestamp BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, selected INTEGER NOT NULL, service_name VARCHAR(255), version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (create_timestamp, host_name, cluster_id, type_name));
+CREATE TABLE metainfo (`metainfo_key` VARCHAR(255) NOT NULL, `metainfo_value` LONGTEXT, PRIMARY KEY (`metainfo_key`));
+CREATE TABLE ClusterHostMapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, host_name));
+CREATE TABLE user_roles (role_name VARCHAR(255) NOT NULL, user_id INTEGER NOT NULL, PRIMARY KEY (role_name, user_id));
+CREATE TABLE ambari_sequences (sequence_name VARCHAR(50) NOT NULL, value DECIMAL(38), PRIMARY KEY (sequence_name));
+
+
+ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hcdesiredconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT FK_hcdesiredconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentdesiredstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT FK_servicecomponentdesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT FK_servicedesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE role_success_criteria ADD CONSTRAINT FK_role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT FK_clusterconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_role_name FOREIGN KEY (role_name) REFERENCES roles (role_name);
+
+
+INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);
+
+insert into ambari.roles(role_name)
+ select 'admin'
+ union all
+ select 'user';
+
+insert into ambari.users(user_id, user_name, user_password)
+ select 1,'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
+
+insert into ambari.user_roles(role_name, user_id)
+ select 'admin',1;
+
+insert into ambari.metainfo(`metainfo_key`, `metainfo_value`)
+ select 'version','1.3.0';
+
+
+
+CREATE TABLE workflow (
+ workflowId VARCHAR(255), workflowName TEXT,
+ parentWorkflowId VARCHAR(255),
+ workflowContext TEXT, userName TEXT,
+ startTime BIGINT, lastUpdateTime BIGINT,
+ numJobsTotal INTEGER, numJobsCompleted INTEGER,
+ inputBytes BIGINT, outputBytes BIGINT,
+ duration BIGINT,
+ PRIMARY KEY (workflowId),
+ FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
+);
+
+CREATE TABLE job (
+ jobId VARCHAR(255), workflowId VARCHAR(255), jobName TEXT, workflowEntityName TEXT,
+ userName TEXT, queue TEXT, acls TEXT, confPath TEXT,
+ submitTime BIGINT, launchTime BIGINT, finishTime BIGINT,
+ maps INTEGER, reduces INTEGER, status TEXT, priority TEXT,
+ finishedMaps INTEGER, finishedReduces INTEGER,
+ failedMaps INTEGER, failedReduces INTEGER,
+ mapsRuntime BIGINT, reducesRuntime BIGINT,
+ mapCounters TEXT, reduceCounters TEXT, jobCounters TEXT,
+ inputBytes BIGINT, outputBytes BIGINT,
+ PRIMARY KEY(jobId),
+ FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
+);
+
+CREATE TABLE task (
+ taskId VARCHAR(255), jobId VARCHAR(255), taskType TEXT, splits TEXT,
+ startTime BIGINT, finishTime BIGINT, status TEXT, error TEXT, counters TEXT,
+ failedAttempt TEXT,
+ PRIMARY KEY(taskId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId)
+);
+
+CREATE TABLE taskAttempt (
+ taskAttemptId VARCHAR(255), taskId VARCHAR(255), jobId VARCHAR(255), taskType TEXT, taskTracker TEXT,
+ startTime BIGINT, finishTime BIGINT,
+ mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT,
+ locality TEXT, avataar TEXT,
+ status TEXT, error TEXT, counters TEXT,
+ inputBytes BIGINT, outputBytes BIGINT,
+ PRIMARY KEY(taskAttemptId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId),
+ FOREIGN KEY(taskId) REFERENCES task(taskId)
+);
+
+CREATE TABLE hdfsEvent (
+ timestamp BIGINT,
+ userName TEXT,
+ clientIP TEXT,
+ operation TEXT,
+ srcPath TEXT,
+ dstPath TEXT,
+ permissions TEXT
+);
+
+CREATE TABLE mapreduceEvent (
+ timestamp BIGINT,
+ userName TEXT,
+ clientIP TEXT,
+ operation TEXT,
+ target TEXT,
+ result TEXT,
+ description TEXT,
+ permissions TEXT
+);
+
+CREATE TABLE clusterEvent (
+ timestamp BIGINT,
+ service TEXT, status TEXT,
+ error TEXT, data TEXT ,
+ host TEXT, rack TEXT
+);
+
+
+
+
Added: incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-DROP.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-DROP.sql?rev=1491057&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-DROP.sql (added)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-MySQL-DROP.sql Sat Jun 8 20:36:54 2013
@@ -0,0 +1,29 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+SET FOREIGN_KEY_CHECKS = 0;
+SET @tables = NULL;
+SELECT GROUP_CONCAT(table_schema, '.', table_name) INTO @tables
+ FROM information_schema.tables
+ WHERE table_schema = @schema; -- specify DB name here.
+
+SET @tables = CONCAT('DROP TABLE ', @tables);
+PREPARE stmt FROM @tables;
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+SET FOREIGN_KEY_CHECKS = 1;
Added: incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql?rev=1491057&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql (added)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql Sat Jun 8 20:36:54 2013
@@ -0,0 +1,175 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+CREATE TABLE clusters (cluster_id NUMBER(19) NOT NULL, cluster_info VARCHAR2(255) NULL, cluster_name VARCHAR2(100) NOT NULL UNIQUE, desired_cluster_state VARCHAR2(255) NULL, desired_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
+CREATE TABLE clusterconfig (version_tag VARCHAR2(255) NOT NULL, type_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_data CLOB NOT NULL, create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
+CREATE TABLE clusterservices (service_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_enabled NUMBER(10) NOT NULL, PRIMARY KEY (service_name, cluster_id));
+CREATE TABLE clusterstate (cluster_id NUMBER(19) NOT NULL, current_cluster_state VARCHAR2(255) NULL, current_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
+CREATE TABLE componentconfigmapping (config_type VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, config_tag VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, service_name));
+CREATE TABLE hostcomponentconfigmapping (config_type VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, config_tag VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, host_name, service_name));
+CREATE TABLE hcdesiredconfigmapping (config_type VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, config_tag VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_type, cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, current_stack_version VARCHAR2(255) NOT NULL, current_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hosts (host_name VARCHAR2(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR2(255) NULL, discovery_status VARCHAR2(2000) NULL, disks_info CLOB NOT NULL, host_attributes CLOB NULL, ipv4 VARCHAR2(255) NULL, ipv6 VARCHAR2(255) NULL, last_registration_time INTEGER NOT NULL, os_arch VARCHAR2(255) NULL, os_info VARCHAR2(1000) NULL, os_type VARCHAR2(255) NULL, ph_cpu_count INTEGER NOT NULL, public_host_name VARCHAR2(255) NULL, rack_info VARCHAR2(255) NOT NULL, total_mem INTEGER NOT NULL, PRIMARY KEY (host_name));
+CREATE TABLE hoststate (agent_version VARCHAR2(255) NULL, available_mem NUMBER(19) NOT NULL, current_state VARCHAR2(255) NOT NULL, health_status VARCHAR2(255) NULL, host_name VARCHAR2(255) NOT NULL, time_in_state NUMBER(19) NOT NULL, PRIMARY KEY (host_name));
+CREATE TABLE servicecomponentdesiredstate (component_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (component_name, cluster_id, service_name));
+CREATE TABLE serviceconfigmapping (config_type VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_tag VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_type, cluster_id, service_name));
+CREATE TABLE servicedesiredstate (cluster_id NUMBER(19) NOT NULL, desired_host_role_mapping NUMBER(10) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, service_name));
+CREATE TABLE roles (role_name VARCHAR2(255) NOT NULL, PRIMARY KEY (role_name));
+CREATE TABLE users (user_id NUMBER(10) NOT NULL, create_time TIMESTAMP NULL, ldap_user NUMBER(10) DEFAULT 0, user_name VARCHAR2(255) NULL, user_password VARCHAR2(255) NULL, PRIMARY KEY (user_id));
+CREATE TABLE execution_command (task_id NUMBER(19) NOT NULL, command BLOB NULL, PRIMARY KEY (task_id));
+CREATE TABLE host_role_command (task_id NUMBER(19) NOT NULL, attempt_count NUMBER(5) NOT NULL, event CLOB NULL, exitcode NUMBER(10) NOT NULL, host_name VARCHAR2(255) NOT NULL, last_attempt_time NUMBER(19) NOT NULL, request_id NUMBER(19) NOT NULL, role VARCHAR2(255) NULL, role_command VARCHAR2(255) NULL, stage_id NUMBER(19) NOT NULL, start_time NUMBER(19) NOT NULL, status VARCHAR2(255) NULL, std_error BLOB NULL, std_out BLOB NULL, PRIMARY KEY (task_id));
+CREATE TABLE role_success_criteria (role VARCHAR2(255) NOT NULL, request_id NUMBER(19) NOT NULL, stage_id NUMBER(19) NOT NULL, success_factor NUMBER(19,4) NOT NULL, PRIMARY KEY (role, request_id, stage_id));
+CREATE TABLE stage (stage_id NUMBER(19) NOT NULL, request_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NULL, log_info VARCHAR2(255) NULL, request_context VARCHAR2(255) NULL, PRIMARY KEY (stage_id, request_id));
+CREATE TABLE key_value_store ("key" VARCHAR2(255) NOT NULL, "value" CLOB NULL, PRIMARY KEY ("key"));
+CREATE TABLE clusterconfigmapping (type_name VARCHAR2(255) NOT NULL, create_timestamp NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, selected NUMBER(10) NOT NULL, version_tag VARCHAR2(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', PRIMARY KEY (type_name, create_timestamp, cluster_id));
+CREATE TABLE hostconfigmapping (create_timestamp NUMBER(19) NOT NULL, host_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, type_name VARCHAR2(255) NOT NULL, selected NUMBER(10) NOT NULL, service_name VARCHAR2(255) NULL, version_tag VARCHAR2(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', PRIMARY KEY (create_timestamp, host_name, cluster_id, type_name));
+CREATE TABLE metainfo ("metainfo_key" VARCHAR2(255) NOT NULL, "metainfo_value" CLOB NULL, PRIMARY KEY ("metainfo_key"));
+CREATE TABLE ClusterHostMapping (cluster_id NUMBER(19) NOT NULL, host_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, host_name));
+CREATE TABLE user_roles (role_name VARCHAR2(255) NOT NULL, user_id NUMBER(10) NOT NULL, PRIMARY KEY (role_name, user_id));
+CREATE TABLE ambari_sequences (sequence_name VARCHAR2(50) NOT NULL, value NUMBER(38) NULL, PRIMARY KEY (sequence_name));
+
+
+ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
+ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT cmponentconfigmappingconfigtag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE componentconfigmapping ADD CONSTRAINT cmpnntconfigmappingcmpnentname FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT hstcmponentconfigmappingclstrd FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hostcomponentconfigmapping ADD CONSTRAINT hstcmponentconfigmappingcnfgtg FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT hcdesiredconfigmappingcnfigtag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE hcdesiredconfigmapping ADD CONSTRAINT hcdesiredconfigmappingclsterid FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES hostcomponentdesiredstate (cluster_id, component_name, host_name, service_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT hstcmponentdesiredstatehstname FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hostcomponentdesiredstate ADD CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES servicecomponentdesiredstate (component_name, cluster_id, service_name);
+ALTER TABLE hostcomponentstate ADD CONSTRAINT hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE servicecomponentdesiredstate ADD CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT srviceconfigmappingservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE serviceconfigmapping ADD CONSTRAINT serviceconfigmappingconfig_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES clusterconfig (version_tag, type_name, cluster_id);
+ALTER TABLE servicedesiredstate ADD CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id);
+ALTER TABLE execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES host_role_command (task_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE role_success_criteria ADD CONSTRAINT role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id);
+ALTER TABLE stage ADD CONSTRAINT FK_stage_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT ClusterHostMapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE ClusterHostMapping ADD CONSTRAINT ClusterHostMapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE user_roles ADD CONSTRAINT FK_user_roles_role_name FOREIGN KEY (role_name) REFERENCES roles (role_name);
+
+
+
+INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);
+INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);
+INSERT INTO metainfo("metainfo_key", "metainfo_value") values ('version', '1.3.0');
+
+insert into Roles(role_name)
+select 'admin' from dual
+union all
+select 'user' from dual;
+
+insert into Users(user_id, user_name, user_password)
+select 1,'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00' from dual;
+
+insert into user_roles(role_name, user_id)
+select 'admin',1 from dual;
+
+
+
+commit;
+
+-- ambari rca
+
+CREATE TABLE workflow (
+ workflowId VARCHAR2(4000), workflowName VARCHAR2(4000),
+ parentWorkflowId VARCHAR2(4000),
+ workflowContext VARCHAR2(4000), userName VARCHAR2(4000),
+ startTime INTEGER, lastUpdateTime INTEGER,
+ numJobsTotal INTEGER, numJobsCompleted INTEGER,
+ inputBytes INTEGER, outputBytes INTEGER,
+ duration INTEGER,
+ PRIMARY KEY (workflowId),
+ FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
+);
+
+CREATE TABLE job (
+ jobId VARCHAR2(4000), workflowId VARCHAR2(4000), jobName VARCHAR2(4000), workflowEntityName VARCHAR2(4000),
+ userName VARCHAR2(4000), queue CLOB, acls CLOB, confPath CLOB,
+ submitTime INTEGER, launchTime INTEGER, finishTime INTEGER,
+ maps INTEGER, reduces INTEGER, status VARCHAR2(4000), priority VARCHAR2(4000),
+ finishedMaps INTEGER, finishedReduces INTEGER,
+ failedMaps INTEGER, failedReduces INTEGER,
+ mapsRuntime INTEGER, reducesRuntime INTEGER,
+ mapCounters VARCHAR2(4000), reduceCounters VARCHAR2(4000), jobCounters VARCHAR2(4000),
+ inputBytes INTEGER, outputBytes INTEGER,
+ PRIMARY KEY(jobId),
+ FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
+);
+
+CREATE TABLE task (
+ taskId VARCHAR2(4000), jobId VARCHAR2(4000), taskType VARCHAR2(4000), splits VARCHAR2(4000),
+ startTime INTEGER, finishTime INTEGER, status VARCHAR2(4000), error CLOB, counters VARCHAR2(4000),
+ failedAttempt VARCHAR2(4000),
+ PRIMARY KEY(taskId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId)
+);
+
+CREATE TABLE taskAttempt (
+ taskAttemptId VARCHAR2(4000), taskId VARCHAR2(4000), jobId VARCHAR2(4000), taskType VARCHAR2(4000), taskTracker VARCHAR2(4000),
+ startTime INTEGER, finishTime INTEGER,
+ mapFinishTime INTEGER, shuffleFinishTime INTEGER, sortFinishTime INTEGER,
+ locality VARCHAR2(4000), avataar VARCHAR2(4000),
+ status VARCHAR2(4000), error CLOB, counters VARCHAR2(4000),
+ inputBytes INTEGER, outputBytes INTEGER,
+ PRIMARY KEY(taskAttemptId),
+ FOREIGN KEY(jobId) REFERENCES job(jobId),
+ FOREIGN KEY(taskId) REFERENCES task(taskId)
+);
+
+CREATE TABLE hdfsEvent (
+ timestamp INTEGER,
+ userName VARCHAR2(4000),
+ clientIP VARCHAR2(4000),
+ operation VARCHAR2(4000),
+ srcPath CLOB,
+ dstPath CLOB,
+ permissions VARCHAR2(4000)
+);
+
+CREATE TABLE mapreduceEvent (
+ timestamp INTEGER,
+ userName VARCHAR2(4000),
+ clientIP VARCHAR2(4000),
+ operation VARCHAR2(4000),
+ target VARCHAR2(4000),
+ result CLOB,
+ description CLOB,
+ permissions VARCHAR2(4000)
+);
+
+CREATE TABLE clusterEvent (
+ timestamp INTEGER,
+ service VARCHAR2(4000), status VARCHAR2(4000),
+ error CLOB, data CLOB ,
+ host VARCHAR2(4000), rack VARCHAR2(4000)
+);
+
Added: incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-DROP.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-DROP.sql?rev=1491057&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-DROP.sql (added)
+++ incubator/ambari/branches/branch-1.2.4/ambari-server/src/main/resources/Ambari-DDL-Oracle-DROP.sql Sat Jun 8 20:36:54 2013
@@ -0,0 +1,58 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements. See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership. The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+BEGIN
+ FOR cur_rec IN (SELECT object_name, object_type
+ FROM user_objects
+ WHERE object_type IN
+ ('TABLE',
+ 'VIEW',
+ 'PACKAGE',
+ 'PROCEDURE',
+ 'FUNCTION',
+ 'SEQUENCE'
+ ))
+ LOOP
+ BEGIN
+ IF cur_rec.object_type = 'TABLE'
+ THEN
+ EXECUTE IMMEDIATE 'DROP '
+ || cur_rec.object_type
+ || ' "'
+ || cur_rec.object_name
+ || '" CASCADE CONSTRAINTS';
+ ELSE
+ EXECUTE IMMEDIATE 'DROP '
+ || cur_rec.object_type
+ || ' "'
+ || cur_rec.object_name
+ || '"';
+ END IF;
+ EXCEPTION
+ WHEN OTHERS
+ THEN
+ DBMS_OUTPUT.put_line ( 'FAILED: DROP '
+ || cur_rec.object_type
+ || ' "'
+ || cur_rec.object_name
+ || '"'
+ );
+ END;
+ END LOOP;
+END;
+/