You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dlab.apache.org by om...@apache.org on 2019/07/18 13:21:48 UTC
[incubator-dlab] branch DLAB-terraform updated: add endpoint deploy
This is an automated email from the ASF dual-hosted git repository.
omartushevskyi pushed a commit to branch DLAB-terraform
in repository https://gitbox.apache.org/repos/asf/incubator-dlab.git
The following commit(s) were added to refs/heads/DLAB-terraform by this push:
new b329387 add endpoint deploy
new 0dddc53 Merge pull request #171 from bohdana-kuzmenko/DLAB-terraform-cli
b329387 is described below
commit b329387f926251de9a47a88abd3ac3be2040c7d4
Author: bohdana_kuzmenko <bo...@gmail.com>
AuthorDate: Thu Jul 18 16:17:44 2019 +0300
add endpoint deploy
---
.../terraform/bin/deploy/__init__.py | 0
.../terraform/bin/deploy/daemon.json | 5 +
.../terraform/bin/deploy/endpoint_fab.py | 465 +++++++++++++++++++++
.../terraform/bin/deploy/provisioning.yml | 140 +++++++
.../terraform/bin/terraform-cli.py | 76 ++--
5 files changed, 653 insertions(+), 33 deletions(-)
diff --git a/infrastructure-provisioning/terraform/bin/deploy/__init__.py b/infrastructure-provisioning/terraform/bin/deploy/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/infrastructure-provisioning/terraform/bin/deploy/daemon.json b/infrastructure-provisioning/terraform/bin/deploy/daemon.json
new file mode 100644
index 0000000..c2932be
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/daemon.json
@@ -0,0 +1,5 @@
+{
+ "insecure-registries": ["REPOSITORY"],
+ "disable-legacy-registry": true,
+ "dns": ["DNS_IP_RESOLVE"]
+}
diff --git a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
new file mode 100644
index 0000000..f31ae98
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
@@ -0,0 +1,465 @@
+from fabric import *
+from patchwork.files import exists
+import logging
+import argparse
+import sys
+import traceback
+import random
+import string
+
+conn = None
+args = None
+keystore_passwd = None
+java_home = None
+
+
+def create_user():
+ initial_user = 'ubuntu'
+ sudo_group = 'sudo'
+ with Connection(host=args.hostname, user=initial_user,
+ connect_kwargs={'key_filename': args.keyfile}) as conn:
+ try:
+ if not exists(conn,
+ '/home/{}/.ssh_user_ensured'.format(initial_user)):
+ conn.sudo('useradd -m -G {1} -s /bin/bash {0}'
+ .format(args.os_user, sudo_group))
+ conn.sudo(
+ 'bash -c \'echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers\''
+ .format(args.os_user, initial_user))
+ conn.sudo('mkdir /home/{}/.ssh'.format(args.os_user))
+ conn.sudo('chown -R {0}:{0} /home/{1}/.ssh/'
+ .format(initial_user, args.os_user))
+ conn.sudo('cat /home/{0}/.ssh/authorized_keys > '
+ '/home/{1}/.ssh/authorized_keys'
+ .format(initial_user, args.os_user))
+ conn.sudo(
+ 'chown -R {0}:{0} /home/{0}/.ssh/'.format(args.os_user))
+ conn.sudo('chmod 700 /home/{0}/.ssh'.format(args.os_user))
+ conn.sudo('chmod 600 /home/{0}/.ssh/authorized_keys'
+ .format(args.os_user))
+ conn.sudo(
+ 'touch /home/{}/.ssh_user_ensured'.format(initial_user))
+ except Exception as err:
+ logging.error('Failed to create new os_user: ', str(err))
+ sys.exit(1)
+
+
+def copy_keys():
+ try:
+ conn.put(args.keyfile, ' /home/{}/keys'.format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to copy keys ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def ensure_crutch_endpoint():
+ try:
+ if not exists(conn, '/home/{}/.ensure_dir'.format(args.os_user)):
+ conn.sudo('mkdir /home/{}/.ensure_dir'.format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to create ~/.ensure_dir/: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def ensure_logs_endpoint():
+ log_root_dir = "/var/opt/dlab/log"
+ supervisor_log_file = "/var/log/application/provision-service.log"
+ if not exists(conn, args.dlab_path):
+ conn.sudo("mkdir -p {}".format(args.dlab_path))
+ conn.sudo("chown -R {} {}".format(args.os_user, args.dlab_path))
+ if not exists(conn, log_root_dir):
+ conn.sudo('mkdir -p {}/provisioning'.format(log_root_dir))
+ conn.sudo('touch {}/provisioning/provisioning.log'.format(log_root_dir))
+ conn.sudo('chmod 666 {}/provisioning/provisioning.log'
+ .format(log_root_dir))
+ if not exists(conn, supervisor_log_file):
+ conn.sudo("mkdir -p /var/log/application")
+ conn.sudo("touch {}".format(supervisor_log_file))
+ conn.sudo("chmod 666 {}".format(supervisor_log_file))
+
+
+def ensure_jre_jdk_endpoint():
+ try:
+ if not exists(conn, '/home/{}/.ensure_dir/jre_jdk_ensured'
+ .format(args.os_user)):
+ conn.sudo('apt-get install -y openjdk-8-jre-headless')
+ conn.sudo('apt-get install -y openjdk-8-jdk-headless')
+ conn.sudo('touch /home/{}/.ensure_dir/jre_jdk_ensured'
+ .format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to install Java JDK: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def ensure_supervisor_endpoint():
+ try:
+ if not exists(conn, '/home/{}/.ensure_dir/superv_ensured'
+ .format(args.os_user)):
+ conn.sudo('apt-get -y install supervisor')
+ conn.sudo('update-rc.d supervisor defaults')
+ conn.sudo('update-rc.d supervisor enable')
+ conn.sudo('touch /home/{}/.ensure_dir/superv_ensured'
+ .format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to install Supervisor: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def ensure_docker_endpoint():
+ try:
+ if not exists(conn, '/home/{}/.ensure_dir/docker_ensured'
+ .format(args.os_user)):
+ conn.sudo("bash -c "
+ "'curl -fsSL https://download.docker.com/linux/ubuntu/gpg"
+ " | apt-key add -'")
+ conn.sudo('add-apt-repository "deb [arch=amd64] '
+ 'https://download.docker.com/linux/ubuntu '
+ '$(lsb_release -cs) stable"')
+ conn.sudo('apt-get update')
+ conn.sudo('apt-cache policy docker-ce')
+ conn.sudo('apt-get install -y docker-ce={}'
+ .format(args.docker_version))
+ dns_ip_resolve = (conn.run("systemd-resolve --status "
+ "| grep -A 5 'Current Scopes: DNS' "
+ "| grep 'DNS Servers:' "
+ "| awk '{print $3}'")
+ .stdout.rstrip("\n\r"))
+ if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+ conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+ conn.put('./daemon.json',
+ '{}/tmp/daemon.json'.format(args.dlab_path))
+ conn.sudo('sed -i "s|REPOSITORY|{}:{}|g" {}/tmp/daemon.json'
+ .format(args.repository_address,
+ args.repository_port,
+ args.dlab_path))
+ conn.sudo('sed -i "s|DNS_IP_RESOLVE|{}|g" {}/tmp/daemon.json'
+ .format(dns_ip_resolve, args.dlab_path))
+ conn.sudo('mv {}/tmp/daemon.json /etc/docker'
+ .format(args.dlab_path))
+ conn.sudo('usermod -a -G docker ' + args.os_user)
+ conn.sudo('update-rc.d docker defaults')
+ conn.sudo('update-rc.d docker enable')
+ conn.sudo('service docker restart')
+ conn.sudo('touch /home/{}/.ensure_dir/docker_ensured'
+ .format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to install Docker: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def create_key_dir_endpoint():
+ try:
+ if not exists(conn, '/home/{}/keys'.format(args.os_user)):
+ conn.run('mkdir /home/{}/keys'.format(args.os_user))
+ except Exception as err:
+ logging.error('Failed create keys directory as ~/keys: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def generate_passwd(size=10,
+ chars=string.digits + string.ascii_letters):
+ global keystore_passwd
+ keystore_passwd = ''.join(random.choice(chars) for _ in range(size))
+
+
+def configure_keystore_endpoint(os_user):
+ try:
+ if not exists(conn,
+ '/home/{}/keys/dlab.keystore.jks'.format(args.os_user)):
+ conn.sudo('keytool -genkeypair -alias dlab -keyalg RSA '
+ '-validity 730 -storepass {1} -keypass {1} '
+ '-keystore /home/{0}/keys/dlab.keystore.jks '
+ '-keysize 2048 -dname "CN={2}"'
+ .format(os_user, keystore_passwd, args.hostname))
+ if not exists(conn, '/home/{}/keys/dlab.crt'.format(args.os_user)):
+ conn.sudo('keytool -exportcert -alias dlab -storepass {1} '
+ '-file /home/{0}/keys/dlab.crt '
+ '-keystore /home/{0}/keys/dlab.keystore.jks'
+ .format(os_user, keystore_passwd))
+ if not exists(conn,
+ '/home/{}/.ensure_dir/cert_imported'
+ .format(args.os_user)):
+ conn.sudo('keytool -importcert -trustcacerts -alias dlab '
+ '-file /home/{0}/keys/dlab.crt -noprompt '
+ '-storepass changeit -keystore {1}/lib/security/cacerts'
+ .format(os_user, java_home))
+ conn.sudo('touch /home/{}/.ensure_dir/cert_imported'
+ .format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to configure Keystore certificates: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def configure_supervisor_endpoint():
+ try:
+ if not exists(conn,
+ '/home/{}/.ensure_dir/configure_supervisor_ensured'
+ .format(args.os_user)):
+ supervisor_conf = '/etc/supervisor/conf.d/supervisor_svc.conf'
+ if not exists(conn, '{}/tmp'.format(args.dlab_path)):
+ conn.run('mkdir -p {}/tmp'.format(args.dlab_path))
+ conn.put('./supervisor_svc.conf',
+ '{}/tmp/supervisor_svc.conf'.format(args.dlab_path))
+ dlab_conf_dir = '{}/conf/'.format(args.dlab_path)
+ if not exists(conn, dlab_conf_dir):
+ conn.run('mkdir -p {}'.format(dlab_conf_dir))
+ web_path = '{}/webapp'.format(args.dlab_path)
+ if not exists(conn, web_path):
+ conn.run('mkdir -p {}'.format(web_path))
+ conn.sudo('sed -i "s|OS_USR|{}|g" {}/tmp/supervisor_svc.conf'
+ .format(args.os_user, args.dlab_path))
+ conn.sudo('sed -i "s|WEB_CONF|{}|g" {}/tmp/supervisor_svc.conf'
+ .format(dlab_conf_dir, args.dlab_path))
+ conn.sudo('sed -i \'s=WEB_APP_DIR={}=\' {}/tmp/supervisor_svc.conf'
+ .format(web_path, args.dlab_path))
+ conn.sudo('cp {}/tmp/supervisor_svc.conf {}'
+ .format(args.dlab_path, supervisor_conf))
+ conn.put('./provisioning.yml', '{}provisioning.yml'
+ .format(dlab_conf_dir))
+ conn.sudo('sed -i "s|KEYNAME|{}|g" {}provisioning.yml'
+ .format(args.conf_key_name, dlab_conf_dir))
+ conn.sudo('sed -i "s|KEYSTORE_PASSWORD|{}|g" {}provisioning.yml'
+ .format(keystore_passwd, dlab_conf_dir))
+ conn.sudo('sed -i "s|JRE_HOME|{}|g" {}provisioning.yml'
+ .format(java_home, dlab_conf_dir))
+ conn.sudo('sed -i "s|CLOUD_PROVIDER|{}|g" {}provisioning.yml'
+ .format(args.cloud_provider, dlab_conf_dir))
+ conn.sudo('sed -i "s|SSN_HOST|{}|g" {}provisioning.yml'
+ .format(args.ssn_host, dlab_conf_dir))
+ conn.sudo('sed -i "s|MONGO_PASSWORD|{}|g" {}provisioning.yml'
+ .format(args.mongo_password, dlab_conf_dir))
+ conn.sudo('touch /home/{}/.ensure_dir/configure_supervisor_ensured'
+ .format(args.os_user))
+ except Exception as err:
+ logging.error('Failed to configure Supervisor: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def ensure_jar_endpoint():
+ try:
+ ensure_file = ('/home/{}/.ensure_dir/backend_jar_ensured'
+ .format(args.os_user))
+ if not exists(conn, ensure_file):
+ web_path = '{}/webapp'.format(args.dlab_path)
+ if not exists(conn, web_path):
+ conn.run('mkdir -p {}'.format(web_path))
+
+ conn.run('wget -P {} --user={} --password={} '
+ 'https://{}/repository/packages/provisioning-service-'
+ '2.1.jar --no-check-certificate'
+ .format(web_path, args.repository_user,
+ args.repository_pass, args.repository_address))
+ conn.run('mv {0}/*.jar {0}/provisioning-service.jar'
+ .format(web_path))
+ conn.sudo('touch {}'.format(ensure_file))
+ except Exception as err:
+ logging.error('Failed to download jar-provisioner: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def start_supervisor_endpoint():
+ try:
+ conn.sudo("service supervisor restart")
+ except Exception as err:
+ logging.error('Unable to start Supervisor: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def pull_docker_images():
+ try:
+ ensure_file = ('/home/{}/.ensure_dir/docker_images_pulled'
+ .format(args.os_user))
+ if not exists(conn, ensure_file):
+ conn.sudo('docker login -u {} -p {} {}:{}'
+ .format(args.repository_user,
+ args.repository_pass,
+ args.repository_address,
+ args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-base'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-edge'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-jupyter'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-zeppelin'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-tensor'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-tensor-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-deeplearning'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-dataengine-service'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker pull {}:{}/docker.dlab-dataengine'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-base docker.dlab-base'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-edge docker.dlab-edge'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-jupyter docker.dlab-jupyter'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-rstudio docker.dlab-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-zeppelin '
+ 'docker.dlab-zeppelin'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-tensor docker.dlab-tensor'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-tensor-rstudio '
+ 'docker.dlab-tensor-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-deeplearning '
+ 'docker.dlab-deeplearning'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-dataengine-service '
+ 'docker.dlab-dataengine-service'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker tag {}:{}/docker.dlab-dataengine '
+ 'docker.dlab-dataengine'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-base'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-edge'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-jupyter'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-zeppelin'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-tensor'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-tensor-rstudio'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-deeplearning'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-dataengine-service'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('docker rmi {}:{}/docker.dlab-dataengine'
+ .format(args.repository_address, args.repository_port))
+ conn.sudo('chown -R {0}:docker /home/{0}/.docker/'
+ .format(args.os_user))
+ conn.sudo('touch {}'.format(ensure_file))
+ except Exception as err:
+ logging.error('Failed to pull Docker images: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def init_args():
+ global args
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--dlab_path', type=str, default='')
+ parser.add_argument('--key_name', type=str, default='')
+ parser.add_argument('--conf_key_name', type=str, default='')
+ parser.add_argument('--keyfile', type=str, default='')
+ parser.add_argument('--hostname', type=str, default='')
+ parser.add_argument('--os_user', type=str, default='dlab-user')
+ parser.add_argument('--cloud_provider', type=str, default='')
+ parser.add_argument('--ssn_host', type=str, default='')
+ parser.add_argument('--mongo_password', type=str, default='')
+ parser.add_argument('--repository_address', type=str, default='')
+ parser.add_argument('--repository_port', type=str, default='')
+ parser.add_argument('--repository_user', type=str, default='')
+ parser.add_argument('--repository_pass', type=str, default='')
+ parser.add_argument('--docker_version', type=str,
+ default='18.06.3~ce~3-0~ubuntu')
+ args = parser.parse_known_args()
+
+
+def update_system():
+ conn.sudo('apt-get update')
+
+
+def init_dlab_connection(ip=None, user=None,
+ pkey=None):
+ global conn
+ if not ip:
+ ip = args.hostname
+ if not user:
+ user = args.os_user
+ if not pkey:
+ pkey = args.keyfile
+ try:
+ conn = Connection(ip, user, connect_kwargs={'key_filename': pkey})
+ except Exception as err:
+ logging.error('Failed connect as dlab-user: ', str(err))
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def set_java_home():
+ global java_home
+ command = ('bash -c "update-alternatives --query java | grep \'Value: \' '
+ '| grep -o \'/.*/jre\'" ')
+ java_home = (conn.sudo(command).stdout.rstrip("\n\r"))
+
+
+def close_connection():
+ global conn
+ conn.close()
+
+
+def start_deploy():
+ init_args()
+
+ logging.info("Creating dlab-user")
+ create_user()
+
+ init_dlab_connection()
+ update_system()
+ generate_passwd()
+
+ logging.info("Configuring Crutch")
+ ensure_crutch_endpoint()
+
+ logging.info("Configuring Logs")
+ ensure_logs_endpoint()
+
+ logging.info("Installing Java")
+ ensure_jre_jdk_endpoint()
+
+ set_java_home()
+
+ logging.info("Installing Supervisor")
+ ensure_supervisor_endpoint()
+
+ logging.info("Installing Docker")
+ ensure_docker_endpoint()
+
+ logging.info("Configuring Supervisor")
+ configure_supervisor_endpoint()
+
+ logging.info("Creating key directory")
+ create_key_dir_endpoint()
+
+ logging.info("Starting Endpoint")
+ configure_keystore_endpoint(args.os_user)
+
+ logging.info("Ensure jar")
+ ensure_jar_endpoint()
+
+ logging.info("Starting supervisor")
+ start_supervisor_endpoint()
+
+ logging.info("Pulling docker images")
+ pull_docker_images()
+
+ close_connection()
+
+
+if __name__ == "__main__":
+ start_deploy()
diff --git a/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml b/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml
new file mode 100644
index 0000000..bf9c32c
--- /dev/null
+++ b/infrastructure-provisioning/terraform/bin/deploy/provisioning.yml
@@ -0,0 +1,140 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+<#assign LOG_ROOT_DIR="/var/opt/dlab/log">
+<#assign KEYS_DIR="/home/${sys['user.name']}/keys">
+<#assign KEY_STORE_PATH="${KEYS_DIR}/dlab.keystore.jks">
+<#assign KEY_STORE_PASSWORD="KEYSTORE_PASSWORD">
+<#assign TRUST_STORE_PATH="JRE_HOME/lib/security/cacerts">
+<#assign TRUST_STORE_PASSWORD="changeit">
+
+# Available options are aws, azure, gcp
+<#assign CLOUD_TYPE="CLOUD_PROVIDER">
+cloudProvider: ${CLOUD_TYPE}
+
+#Switch on/off developer mode here
+<#assign DEV_MODE="false">
+devMode: ${DEV_MODE}
+
+
+mongo:
+ host: SSN_HOST
+ port: 27017
+ username: admin
+ password: MONGO_PASSWORD
+ database: dlabdb
+
+selfService:
+ protocol: https
+ host: SSN_HOST
+ port: 8443
+ jerseyClient:
+ timeout: 3s
+ connectionTimeout: 3s
+
+securityService:
+ protocol: https
+ host: SSN_HOST
+ port: 8090
+ jerseyClient:
+ timeout: 20s
+ connectionTimeout: 20s
+
+
+provisioningService:
+ protocol: https
+ host: localhost
+ port: 8084
+ jerseyClient:
+ timeout: 3s
+ connectionTimeout: 3s
+
+# Log out user on inactivity
+inactiveUserTimeoutMillSec: 7200000
+
+backupScriptPath: /opt/dlab/tmp/backup.py
+backupDirectory: /opt/dlab/tmp/result
+keyDirectory: ${KEYS_DIR}
+responseDirectory: /opt/dlab/tmp
+handlerDirectory: /opt/dlab/handlers
+dockerLogDirectory: ${LOG_ROOT_DIR}
+warmupPollTimeout: 25s
+resourceStatusPollTimeout: 300m
+keyLoaderPollTimeout: 30m
+requestEnvStatusTimeout: 50s
+adminKey: KEYNAME
+edgeImage: docker.dlab-edge
+fileLengthCheckDelay: 500ms
+
+<#if CLOUD_TYPE == "aws">
+emrEC2RoleDefault: EMR_EC2_DefaultRole
+emrServiceRoleDefault: EMR_DefaultRole
+</#if>
+
+processMaxThreadsPerJvm: 50
+processMaxThreadsPerUser: 5
+processTimeout: 180m
+
+handlersPersistenceEnabled: true
+
+server:
+ requestLog:
+ appenders:
+ - type: file
+ currentLogFilename: ${LOG_ROOT_DIR}/provisioning/request-provisioning.log
+ archive: true
+ archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/request-provisioning-%d{yyyy-MM-dd}.log.gz
+ archivedFileCount: 10
+ applicationConnectors:
+# - type: http
+ - type: https
+ port: 8084
+ certAlias: dlab
+ validateCerts: true
+ keyStorePath: ${KEY_STORE_PATH}
+ keyStorePassword: ${KEY_STORE_PASSWORD}
+ trustStorePath: ${TRUST_STORE_PATH}
+ trustStorePassword: ${TRUST_STORE_PASSWORD}
+ adminConnectors:
+# - type: http
+ - type: https
+ port: 8085
+ certAlias: dlab
+ validateCerts: true
+ keyStorePath: ${KEY_STORE_PATH}
+ keyStorePassword: ${KEY_STORE_PASSWORD}
+ trustStorePath: ${TRUST_STORE_PATH}
+ trustStorePassword: ${TRUST_STORE_PASSWORD}
+
+logging:
+ level: INFO
+ loggers:
+ com.epam: TRACE
+ com.aegisql: INFO
+ appenders:
+<#if DEV_MODE == "true">
+ - type: console
+</#if>
+ - type: file
+ currentLogFilename: ${LOG_ROOT_DIR}/provisioning/provisioning.log
+ archive: true
+ archivedLogFilenamePattern: ${LOG_ROOT_DIR}/provisioning/provisioning-%d{yyyy-MM-dd}.log.gz
+ archivedFileCount: 10
diff --git a/infrastructure-provisioning/terraform/bin/terraform-cli.py b/infrastructure-provisioning/terraform/bin/terraform-cli.py
index 8b3cc93..31662be 100755
--- a/infrastructure-provisioning/terraform/bin/terraform-cli.py
+++ b/infrastructure-provisioning/terraform/bin/terraform-cli.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+import itertools
import json
import os
import abc
@@ -8,7 +9,10 @@ import time
from fabric import Connection
from patchwork.transfers import rsync
import logging
-
+import os.path
+import sys
+from deploy.endpoint_fab import start_deploy
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
logging.basicConfig(level=logging.INFO,
format='%(levelname)s-%(message)s')
@@ -224,7 +228,7 @@ class AbstractDeployBuilder:
class DeployDirector:
- def build(self, builder):
+ def build(self, *builders):
""" Do build action
Args:
@@ -233,8 +237,9 @@ class DeployDirector:
None
"""
try:
- builder.provision()
- builder.deploy()
+ for builder in builders:
+ builder.provision()
+ builder.deploy()
except Exception as ex:
print(ex)
@@ -446,6 +451,8 @@ class AWSK8sSourceBuilder(AbstractDeployBuilder):
conn.run('terraform validate')
conn.run('terraform apply -auto-approve '
'-var \'ssn_k8s_alb_dns_name={}\''.format(dns_name))
+ output = json.loads(conn.run('terraform output -json').stdout)
+ self.fill_args_from_dict(output)
def output_terraform_result(self):
dns_name = json.loads(TerraformProvider().output(' -json ssn_k8s_alb_dns_name'))
@@ -466,6 +473,10 @@ class AWSK8sSourceBuilder(AbstractDeployBuilder):
""".format(dns_name, ssn_bucket_name, ssn_vpc_id,
', '.join(ssn_subnets), ssn_k8s_sg_id, dns_name))
+ def fill_args_from_dict(self, output):
+ for key, value in output.items():
+ sys.argv.extend([key, value.get('value')])
+
def deploy(self):
if self.args.get('service_args').get('action') == 'destroy':
return
@@ -475,6 +486,7 @@ class AWSK8sSourceBuilder(AbstractDeployBuilder):
self.check_tiller_status()
self.copy_terraform_to_remote()
self.run_remote_terraform()
+ self.fill_args_from_dict(json.loads(TerraformProvider().output()))
self.output_terraform_result()
@@ -511,49 +523,47 @@ class AWSEndpointBuilder(AbstractDeployBuilder):
default='t2.medium')
.add_int('--endpoint_volume_size', 'Size of root volume in GB.',
default=30)
- .add_str('--request_id', 'Request id', is_terraform_param=False)
- .add_str('--dlab_path', '', is_terraform_param=False)
- .add_str('--resource', '', is_terraform_param=False)
- .add_str('--conf_key_name', '', is_terraform_param=False)
- .add_str('--pkey', '', is_terraform_param=False, required=True)
- .add_str('--hostname', '', is_terraform_param=False)
- .add_str('--jar_url', '', is_terraform_param=False)
- .add_str('--os_user', '', is_terraform_param=False)
- .add_str('--cloud_provider', '', is_terraform_param=False)
- .add_str('--ssn_host', '', is_terraform_param=False)
- .add_str('--mongo_password', '', is_terraform_param=False)
- .add_str('--repository_address', '', is_terraform_param=False)
- .add_str('--repository_user', '', is_terraform_param=False)
- .add_str('--repository_pass', '', is_terraform_param=False)
- .add_str('--docker_version', '', is_terraform_param=False,
- default='18.06.3~ce~3-0~ubuntu')
)
return params.build()
def deploy(self):
- pass
+ start_deploy()
def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--source', help='Target', choices=['aws'],
- required=True)
- parser.add_argument('--target', help='Source', choices=['k8s', 'endpoint'],
- required=True)
- arguments = vars(parser.parse_known_args()[0])
+ sources_targets = {'aws': ['k8s', 'endpoint']}
+
+ no_args_error = ('usage: ./terraform-cli {} {}'
+ .format(set(sources_targets.keys()),
+ set(itertools.chain(*sources_targets.values()))))
+
+ no_target_error = lambda x: ('usage: ./terraform-cli {} {}'
+ .format(x,
+ set(itertools.chain(*sources_targets.values()))))
- source = arguments.get('source').lower()
- target = arguments.get('target').lower()
+ if any([len(sys.argv) == 1,
+ len(sys.argv) > 2 and sys.argv[1] not in sources_targets]):
+ print(no_args_error)
+ sys.exit(1)
+
+ if any([len(sys.argv) == 2,
+ sys.argv[1] not in sources_targets,
+ len(sys.argv) > 2 and sys.argv[2] not in sources_targets[sys.argv[1]]
+ ]):
+ print(no_target_error(sys.argv[1]))
+ exit(1)
+
+ source = sys.argv[1]
+ target = sys.argv[2]
if source == 'aws':
if target == 'k8s':
- builder = AWSK8sSourceBuilder()
+ builders = AWSK8sSourceBuilder(),
elif target == 'endpoint':
- builder = AWSEndpointBuilder()
+ builders = (AWSK8sSourceBuilder(), AWSEndpointBuilder())
deploy_director = DeployDirector()
- deploy_director.build(builder)
-
+ deploy_director.build(*builders)
if __name__ == "__main__":
main()
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@dlab.apache.org
For additional commands, e-mail: commits-help@dlab.apache.org