You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dlab.apache.org by om...@apache.org on 2019/07/03 13:12:41 UTC

[incubator-dlab] branch DLAB-terraform updated: added Nexus deployment;

This is an automated email from the ASF dual-hosted git repository.

omartushevskyi pushed a commit to branch DLAB-terraform
in repository https://gitbox.apache.org/repos/asf/incubator-dlab.git


The following commit(s) were added to refs/heads/DLAB-terraform by this push:
     new ad585cb  added Nexus deployment;
ad585cb is described below

commit ad585cb7aff160f353b7a666f31a6a351314fb77
Author: Oleh Martushevskyi <Ol...@epam.com>
AuthorDate: Wed Jul 3 16:12:34 2019 +0300

    added Nexus deployment;
---
 .../scripts/deploy_repository/deploy_repository.py | 1773 ++++++++++++++++++++
 .../scripts/deploy_repository/files/Dockerfile     |   29 +
 .../files/mount-efs-sequentially.service           |   29 +
 .../deploy_repository/files/nexus.properties       |   20 +
 .../scripts/addUpdateScript.groovy                 |   91 +
 .../scripts/update_amazon_repositories.py          |   53 +
 .../templates/addCustomRepository.groovy           |  209 +++
 .../templates/configureNexus.groovy                |   81 +
 .../deploy_repository/templates/jetty-https.xml    |   98 ++
 .../scripts/deploy_repository/templates/nexus.conf |   76 +
 .../deploy_repository/templates/nexus.service      |   32 +
 .../scripts/deploy_repository/templates/squid.conf |   55 +
 .../templates/updateRepositories.groovy            |   24 +
 13 files changed, 2570 insertions(+)

diff --git a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
new file mode 100644
index 0000000..09d6bfb
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
@@ -0,0 +1,1773 @@
+#!/usr/bin/python
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+from fabric.api import *
+from fabric.contrib.files import exists
+import argparse
+import boto3
+import traceback
+import sys
+import json
+import time
+import string
+import random
+from ConfigParser import SafeConfigParser
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--service_base_name', required=True, type=str, default='',
+                    help='unique name for repository environment')
+parser.add_argument('--aws_access_key', type=str, default='', help='AWS Access Key ID')
+parser.add_argument('--aws_secret_access_key', type=str, default='', help='AWS Secret Access Key')
+parser.add_argument('--vpc_id', type=str, default='', help='AWS VPC ID')
+parser.add_argument('--vpc_cidr', type=str, default='172.31.0.0/16', help='Cidr of VPC')
+parser.add_argument('--subnet_id', type=str, default='', help='AWS Subnet ID')
+parser.add_argument('--subnet_cidr', type=str, default='172.31.0.0/24', help='Cidr of subnet')
+parser.add_argument('--sg_id', type=str, default='', help='AWS VPC ID')
+parser.add_argument('--billing_tag', type=str, default='product:dlab', help='Tag in format: "Key1:Value1"')
+parser.add_argument('--additional_tags', type=str, default='', help='Tags in format: "Key1:Value1;Key2:Value2"')
+parser.add_argument('--tag_resource_id', type=str, default='dlab', help='The name of user tag')
+parser.add_argument('--allowed_ip_cidr', type=str, default='', help='Comma-separated CIDR of IPs which will have '
+                                                                    'access to the instance')
+parser.add_argument('--key_name', type=str, default='', help='Key name (WITHOUT ".pem")')
+parser.add_argument('--key_path', type=str, default='', help='Key path')
+parser.add_argument('--instance_type', type=str, default='t2.medium', help='Instance shape')
+parser.add_argument('--region', required=True, type=str, default='', help='AWS region name')
+parser.add_argument('--elastic_ip', type=str, default='', help='Elastic IP address')
+parser.add_argument('--network_type', type=str, default='public', help='Network type: public or private')
+parser.add_argument('--hosted_zone_name', type=str, default='', help='Name of hosted zone')
+parser.add_argument('--hosted_zone_id', type=str, default='', help='ID of hosted zone')
+parser.add_argument('--subdomain', type=str, default='', help='Subdomain name')
+parser.add_argument('--efs_enabled', type=str, default='False', help="True - use AWS EFS, False - don't use AWS EFS")
+parser.add_argument('--efs_id', type=str, default='', help="ID of AWS EFS")
+parser.add_argument('--primary_disk_size', type=str, default='30', help="Disk size of primary volume")
+parser.add_argument('--additional_disk_size', type=str, default='50', help="Disk size of additional volume")
+parser.add_argument('--dlab_conf_file_path', type=str, default='', help="Full path to DLab conf file")
+parser.add_argument('--nexus_admin_password', type=str, default='', help="Password for Nexus admin user")
+parser.add_argument('--nexus_service_user_name', type=str, default='dlab-nexus', help="Nexus service user name")
+parser.add_argument('--nexus_service_user_password', type=str, default='', help="Nexus service user password")
+parser.add_argument('--action', required=True, type=str, default='', help='Action: create or terminate')
+args = parser.parse_args()
+
+
+def id_generator(size=10, with_digits=True):
+    if with_digits:
+        chars = string.digits + string.ascii_letters
+    else:
+        chars = string.ascii_letters
+    return ''.join(random.choice(chars) for _ in range(size))
+
+
+def vpc_exist(return_id=False):
+    try:
+        vpc_created = False
+        for vpc in ec2_resource.vpcs.filter(Filters=[{'Name': 'tag-key', 'Values': [tag_name]},
+                                                     {'Name': 'tag-value', 'Values': [args.service_base_name]}]):
+            if return_id:
+                return vpc.id
+            vpc_created = True
+        return vpc_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS VPC: {}'.format(str(err)))
+        raise Exception
+
+
+def create_vpc(vpc_cidr):
+    try:
+        tag = {"Key": tag_name, "Value": args.service_base_name}
+        name_tag = {"Key": "Name", "Value": args.service_base_name + '-vpc'}
+        vpc = ec2_resource.create_vpc(CidrBlock=vpc_cidr)
+        create_tag(vpc.id, tag)
+        create_tag(vpc.id, name_tag)
+        return vpc.id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS VPC: {}'.format(str(err)))
+        raise Exception
+
+
+def enable_vpc_dns(vpc_id):
+    try:
+        ec2_client.modify_vpc_attribute(VpcId=vpc_id,
+                                        EnableDnsHostnames={'Value': True})
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with modifying AWS VPC attributes: {}'.format(str(err)))
+        raise Exception
+
+
+def create_rt(vpc_id):
+    try:
+        tag = {"Key": tag_name, "Value": args.service_base_name}
+        name_tag = {"Key": "Name", "Value": args.service_base_name + '-rt'}
+        route_table = []
+        rt = ec2_client.create_route_table(VpcId=vpc_id)
+        rt_id = rt.get('RouteTable').get('RouteTableId')
+        route_table.append(rt_id)
+        print('Created AWS Route-Table with ID: {}'.format(rt_id))
+        create_tag(route_table, json.dumps(tag))
+        create_tag(route_table, json.dumps(name_tag))
+        ig = ec2_client.create_internet_gateway()
+        ig_id = ig.get('InternetGateway').get('InternetGatewayId')
+        route_table = list()
+        route_table.append(ig_id)
+        create_tag(route_table, json.dumps(tag))
+        create_tag(route_table, json.dumps(name_tag))
+        ec2_client.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id)
+        ec2_client.create_route(DestinationCidrBlock='0.0.0.0/0', RouteTableId=rt_id, GatewayId=ig_id)
+        return rt_id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS Route Table: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_vpc(vpc_id):
+    try:
+        ec2_client.delete_vpc(VpcId=vpc_id)
+        print("AWS VPC {} has been removed".format(vpc_id))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS VPC: {}'.format(str(err)))
+        raise Exception
+
+
+def create_tag(resource, tag, with_tag_res_id=True):
+    try:
+        tags_list = list()
+        if type(tag) == dict:
+            resource_name = tag.get('Value')
+            resource_tag = tag
+        else:
+            resource_name = json.loads(tag).get('Value')
+            resource_tag = json.loads(tag)
+        if type(resource) != list:
+            resource = [resource]
+        tags_list.append(resource_tag)
+        if with_tag_res_id:
+            tags_list.append(
+                {
+                    'Key': args.tag_resource_id,
+                    'Value': args.service_base_name + ':' + resource_name
+                }
+            )
+            tags_list.append(
+                {
+                    'Key': args.billing_tag.split(':')[0],
+                    'Value': args.billing_tag.split(':')[1]
+                }
+            )
+        if args.additional_tags:
+            for tag in args.additional_tags.split(';'):
+                tags_list.append(
+                    {
+                        'Key': tag.split(':')[0],
+                        'Value': tag.split(':')[1]
+                    }
+                )
+        ec2_client.create_tags(
+            Resources=resource,
+            Tags=tags_list
+        )
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with setting tag: {}'.format(str(err)))
+        raise Exception
+
+
+def create_efs_tag():
+    try:
+        tag = {"Key": tag_name, "Value": args.service_base_name}
+        name_tag = {"Key": "Name", "Value": args.service_base_name + '-efs'}
+        efs_client.create_tags(
+            FileSystemId=args.efs_id,
+            Tags=[
+                tag,
+                name_tag
+            ]
+        )
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with setting EFS tag: {}'.format(str(err)))
+        raise Exception
+
+
+def create_subnet(vpc_id, subnet_cidr):
+    try:
+        tag = {"Key": tag_name, "Value": "{}".format(args.service_base_name)}
+        name_tag = {"Key": "Name", "Value": "{}-subnet".format(args.service_base_name)}
+        subnet = ec2_resource.create_subnet(VpcId=vpc_id, CidrBlock=subnet_cidr)
+        create_tag(subnet.id, tag)
+        create_tag(subnet.id, name_tag)
+        subnet.reload()
+        print('AWS Subnet has been created')
+        return subnet.id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS Subnet: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_subnet():
+    try:
+        subnets = ec2_resource.subnets.filter(
+            Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [args.service_base_name]}])
+        if subnets:
+            for subnet in subnets:
+                ec2_client.delete_subnet(SubnetId=subnet.id)
+                print("The AWS subnet {} has been deleted successfully".format(subnet.id))
+        else:
+            print("There are no private AWS subnets to delete")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS Subnet: {}'.format(str(err)))
+        raise Exception
+
+
+def get_route_table_by_tag(tag_value):
+    try:
+        route_tables = ec2_client.describe_route_tables(
+            Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(tag_value)]}])
+        rt_id = route_tables.get('RouteTables')[0].get('RouteTableId')
+        return rt_id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS Route tables: {}'.format(str(err)))
+        raise Exception
+
+
+def create_security_group(security_group_name, vpc_id, ingress, egress, tag, name_tag):
+    try:
+        group = ec2_resource.create_security_group(GroupName=security_group_name, Description='security_group_name',
+                                                   VpcId=vpc_id)
+        time.sleep(10)
+        create_tag(group.id, tag)
+        create_tag(group.id, name_tag)
+        try:
+            group.revoke_egress(IpPermissions=[{"IpProtocol": "-1", "IpRanges": [{"CidrIp": "0.0.0.0/0"}],
+                                                "UserIdGroupPairs": [], "PrefixListIds": []}])
+        except:
+            print("Mentioned rule does not exist")
+        for rule in ingress:
+            group.authorize_ingress(IpPermissions=[rule])
+        for rule in egress:
+            group.authorize_egress(IpPermissions=[rule])
+        return group.id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS security group: {}'.format(str(err)))
+        raise Exception
+
+
+def get_vpc_cidr_by_id(vpc_id):
+    try:
+        cidr_list = list()
+        for vpc in ec2_client.describe_vpcs(VpcIds=[vpc_id]).get('Vpcs'):
+            for cidr_set in vpc.get('CidrBlockAssociationSet'):
+                cidr_list.append(cidr_set.get('CidrBlock'))
+            return cidr_list
+        return ''
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS VPC CIDR: {}'.format(str(err)))
+        raise Exception
+
+
+def format_sg(sg_rules):
+    try:
+        formatted_sg_rules = list()
+        for rule in sg_rules:
+            if rule['IpRanges']:
+                for ip_range in rule['IpRanges']:
+                    formatted_rule = dict()
+                    for key in rule.keys():
+                        if key == 'IpRanges':
+                            formatted_rule['IpRanges'] = [ip_range]
+                        else:
+                            formatted_rule[key] = rule[key]
+                    if formatted_rule not in formatted_sg_rules:
+                        formatted_sg_rules.append(formatted_rule)
+            else:
+                formatted_sg_rules.append(rule)
+        return formatted_sg_rules
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with formating AWS SG rules: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_sgroups():
+    try:
+        sgs = ec2_resource.security_groups.filter(
+            Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [args.service_base_name]}])
+        if sgs:
+            for sg in sgs:
+                ec2_client.delete_security_group(GroupId=sg.id)
+                print("The AWS security group {} has been deleted successfully".format(sg.id))
+        else:
+            print("There are no AWS security groups to delete")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS SG: {}'.format(str(err)))
+        raise Exception
+
+
+def create_instance():
+    try:
+        user_data = ''
+        ami_id = get_ami_id('ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160907.1')
+        instances = ec2_resource.create_instances(ImageId=ami_id, MinCount=1, MaxCount=1,
+                                                  BlockDeviceMappings=[
+                                                      {
+                                                          "DeviceName": "/dev/sda1",
+                                                          "Ebs":
+                                                              {
+                                                                  "VolumeSize": int(args.primary_disk_size)
+                                                              }
+                                                      },
+                                                      {
+                                                          "DeviceName": "/dev/sdb",
+                                                          "Ebs":
+                                                              {
+                                                                  "VolumeSize": int(args.additional_disk_size)
+                                                              }
+                                                      }],
+                                                  KeyName=args.key_name,
+                                                  SecurityGroupIds=[args.sg_id],
+                                                  InstanceType=args.instance_type,
+                                                  SubnetId=args.subnet_id,
+                                                  UserData=user_data)
+        for instance in instances:
+            print("Waiting for instance {} become running.".format(instance.id))
+            instance.wait_until_running()
+            tag = {'Key': 'Name', 'Value': args.service_base_name + '-repository'}
+            instance_tag = {"Key": tag_name, "Value": args.service_base_name}
+            create_tag(instance.id, tag)
+            create_tag(instance.id, instance_tag)
+            tag_intance_volume(instance.id, args.service_base_name + '-repository', instance_tag)
+            return instance.id
+        return ''
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS EC2 instance: {}'.format(str(err)))
+        raise Exception
+
+
+def tag_intance_volume(instance_id, node_name, instance_tag):
+    try:
+        volume_list = get_instance_attr(instance_id, 'block_device_mappings')
+        counter = 0
+        instance_tag_value = instance_tag.get('Value')
+        for volume in volume_list:
+            if counter == 1:
+                volume_postfix = '-volume-secondary'
+            else:
+                volume_postfix = '-volume-primary'
+            tag = {'Key': 'Name',
+                   'Value': node_name + volume_postfix}
+            volume_tag = instance_tag
+            volume_tag['Value'] = instance_tag_value + volume_postfix
+            volume_id = volume.get('Ebs').get('VolumeId')
+            create_tag(volume_id, tag)
+            create_tag(volume_id, volume_tag)
+            counter += 1
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with tagging AWS EC2 instance volumes: {}'.format(str(err)))
+        raise Exception
+
+
+def get_instance_attr(instance_id, attribute_name):
+    try:
+        instances = ec2_resource.instances.filter(
+            Filters=[{'Name': 'instance-id', 'Values': [instance_id]},
+                     {'Name': 'instance-state-name', 'Values': ['running']}])
+        for instance in instances:
+            return getattr(instance, attribute_name)
+        return ''
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS EC2 instance attributes: {}'.format(str(err)))
+        raise Exception
+
+
+def get_ami_id(ami_name):
+    try:
+        image_id = ''
+        response = ec2_client.describe_images(
+            Filters=[
+                {
+                    'Name': 'name',
+                    'Values': [ami_name]
+                },
+                {
+                    'Name': 'virtualization-type', 'Values': ['hvm']
+                },
+                {
+                    'Name': 'state', 'Values': ['available']
+                },
+                {
+                    'Name': 'root-device-name', 'Values': ['/dev/sda1']
+                },
+                {
+                    'Name': 'root-device-type', 'Values': ['ebs']
+                },
+                {
+                    'Name': 'architecture', 'Values': ['x86_64']
+                }
+            ])
+        response = response.get('Images')
+        for i in response:
+            image_id = i.get('ImageId')
+        if image_id == '':
+            raise Exception("Unable to find AWS AMI id with name: " + ami_name)
+        return image_id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS AMI ID: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_route_tables():
+    try:
+        rtables = ec2_client.describe_route_tables(Filters=[{'Name': 'tag-key', 'Values': [tag_name]}]).get('RouteTables')
+        for rtable in rtables:
+            if rtable:
+                rtable_associations = rtable.get('Associations')
+                rtable = rtable.get('RouteTableId')
+                for association in rtable_associations:
+                    ec2_client.disassociate_route_table(AssociationId=association.get('RouteTableAssociationId'))
+                    print("Association {} has been removed".format(association.get('RouteTableAssociationId')))
+                ec2_client.delete_route_table(RouteTableId=rtable)
+                print("AWS Route table {} has been removed".format(rtable))
+            else:
+                print("There are no AWS route tables to remove")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS Route Tables: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_ec2():
+    try:
+        inst = ec2_resource.instances.filter(
+            Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'stopped', 'pending', 'stopping']},
+                     {'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(args.service_base_name)]}])
+        instances = list(inst)
+        if instances:
+            for instance in instances:
+                ec2_client.terminate_instances(InstanceIds=[instance.id])
+                waiter = ec2_client.get_waiter('instance_terminated')
+                waiter.wait(InstanceIds=[instance.id])
+                print("The instance {} has been terminated successfully".format(instance.id))
+        else:
+            print("There are no instances with '{}' tag to terminate".format(tag_name))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing EC2 instances: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_internet_gateways(vpc_id, tag_value):
+    try:
+        ig_id = ''
+        response = ec2_client.describe_internet_gateways(
+            Filters=[
+                {'Name': 'tag-key', 'Values': [tag_name]},
+                {'Name': 'tag-value', 'Values': [tag_value]}]).get('InternetGateways')
+        for i in response:
+            ig_id = i.get('InternetGatewayId')
+        ec2_client.detach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id)
+        print("AWS Internet gateway {0} has been detached from VPC {1}".format(ig_id, vpc_id))
+        ec2_client.delete_internet_gateway(InternetGatewayId=ig_id)
+        print("AWS Internet gateway {} has been deleted successfully".format(ig_id))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS Internet gateways: {}'.format(str(err)))
+        raise Exception
+
+
+def enable_auto_assign_ip(subnet_id):
+    try:
+        ec2_client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_id)
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with enabling auto-assign of public IP addresses: {}'.format(str(err)))
+        raise Exception
+
+
+def subnet_exist(return_id=False):
+    try:
+        subnet_created = False
+        if args.vpc_id:
+            filters = [{'Name': 'tag-key', 'Values': [tag_name]},
+                       {'Name': 'tag-value', 'Values': [args.service_base_name]},
+                       {'Name': 'vpc-id', 'Values': [args.vpc_id]}]
+        else:
+            filters = [{'Name': 'tag-key', 'Values': [tag_name]},
+                       {'Name': 'tag-value', 'Values': [args.service_base_name]}]
+        for subnet in ec2_resource.subnets.filter(Filters=filters):
+            if return_id:
+                return subnet.id
+            subnet_created = True
+        return subnet_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS Subnet: {}'.format(str(err)))
+        raise Exception
+
+
+def sg_exist(return_id=False):
+    try:
+        sg_created = False
+        for security_group in ec2_resource.security_groups.filter(
+                Filters=[{'Name': 'group-name', 'Values': [args.service_base_name + "-sg"]}]):
+            if return_id:
+                return security_group.id
+            sg_created = True
+        return sg_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS Security group: {}'.format(str(err)))
+        raise Exception
+
+
+def ec2_exist(return_id=False):
+    try:
+        ec2_created = False
+        instances = ec2_resource.instances.filter(
+            Filters=[{'Name': 'tag:Name', 'Values': [args.service_base_name + '-repository']},
+                     {'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']}])
+        for instance in instances:
+            if return_id:
+                return instance.id
+            ec2_created = True
+        return ec2_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS EC2 instance: {}'.format(str(err)))
+        raise Exception
+
+
+def allocate_elastic_ip():
+    try:
+        tag = {"Key": tag_name, "Value": "{}".format(args.service_base_name)}
+        name_tag = {"Key": "Name", "Value": "{}-eip".format(args.service_base_name)}
+        allocation_id = ec2_client.allocate_address(Domain='vpc').get('AllocationId')
+        create_tag(allocation_id, tag)
+        create_tag(allocation_id, name_tag)
+        print('AWS Elastic IP address has been allocated')
+        return allocation_id
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS Elastic IP: {}'.format(str(err)))
+        raise Exception
+
+
+def release_elastic_ip():
+    try:
+        allocation_id = elastic_ip_exist(True)
+        ec2_client.release_address(AllocationId=allocation_id)
+        print("AWS Elastic IP address has been released.")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS Elastic IP: {}'.format(str(err)))
+        raise Exception
+
+
+def associate_elastic_ip(instance_id, allocation_id):
+    try:
+        ec2_client.associate_address(InstanceId=instance_id, AllocationId=allocation_id).get('AssociationId')
+        print("AWS Elastic IP address has been associated.")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with associating AWS Elastic IP: {}'.format(str(err)))
+        raise Exception
+
+
+def disassociate_elastic_ip(association_id):
+    try:
+        ec2_client.disassociate_address(AssociationId=association_id)
+        print("AWS Elastic IP address has been disassociated.")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with disassociating AWS Elastic IP: {}'.format(str(err)))
+        raise Exception
+
+
+def elastic_ip_exist(return_id=False, return_parameter='AllocationId'):
+    try:
+        elastic_ip_created = False
+        elastic_ips = ec2_client.describe_addresses(
+            Filters=[
+                {'Name': 'tag-key', 'Values': [tag_name]},
+                {'Name': 'tag-value', 'Values': [args.service_base_name]}
+            ]
+        ).get('Addresses')
+        for elastic_ip in elastic_ips:
+            if return_id:
+                return elastic_ip.get(return_parameter)
+            elastic_ip_created = True
+        return elastic_ip_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS Elastic IP: {}'.format(str(err)))
+        raise Exception
+
+
+def create_route_53_record(hosted_zone_id, hosted_zone_name, subdomain, ip_address):
+    try:
+        route53_client.change_resource_record_sets(
+            HostedZoneId=hosted_zone_id,
+            ChangeBatch={
+                'Changes': [
+                    {
+                        'Action': 'CREATE',
+                        'ResourceRecordSet': {
+                            'Name': "{}.{}".format(subdomain, hosted_zone_name),
+                            'Type': 'A',
+                            'TTL': 300,
+                            'ResourceRecords': [
+                                {
+                                    'Value': ip_address
+                                }
+                            ]
+                        }
+                    }
+                ]
+            }
+        )
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS Route53 record: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_route_53_record(hosted_zone_id, hosted_zone_name, subdomain):
+    try:
+        for record_set in route53_client.list_resource_record_sets(
+                HostedZoneId=hosted_zone_id).get('ResourceRecordSets'):
+            if record_set['Name'] == "{}.{}.".format(subdomain, hosted_zone_name):
+                for record in record_set['ResourceRecords']:
+                    route53_client.change_resource_record_sets(
+                        HostedZoneId=hosted_zone_id,
+                        ChangeBatch={
+                            'Changes': [
+                                {
+                                    'Action': 'DELETE',
+                                    'ResourceRecordSet': {
+                                        'Name': record_set['Name'],
+                                        'Type': 'A',
+                                        'TTL': 300,
+                                        'ResourceRecords': [
+                                            {
+                                                'Value': record['Value']
+                                            }
+                                        ]
+                                    }
+                                }
+                            ]
+                        }
+                    )
+        print("AWS Route53 record has been removed.")
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS Route53 record: {}'.format(str(err)))
+        raise Exception
+
+
+def get_instance_ip_address_by_id(instance_id, ip_address_type):
+    try:
+        instances = ec2_resource.instances.filter(
+            Filters=[{'Name': 'instance-id', 'Values': [instance_id]},
+                     {'Name': 'instance-state-name', 'Values': ['running']}])
+        for instance in instances:
+            return getattr(instance, ip_address_type)
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS EC2 instance IP address: {}'.format(str(err)))
+        raise Exception
+
+
+def create_efs():
+    try:
+        token = id_generator(10, False)
+        efs = efs_client.create_file_system(
+            CreationToken=token,
+            PerformanceMode='generalPurpose',
+            Encrypted=True
+        )
+        while efs_client.describe_file_systems(
+                FileSystemId=efs.get('FileSystemId')).get('FileSystems')[0].get('LifeCycleState') != 'available':
+            time.sleep(5)
+        return efs.get('FileSystemId')
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS EFS: {}'.format(str(err)))
+        raise Exception
+
+
+def create_mount_target(efs_sg_id):
+    try:
+        mount_target_id = efs_client.create_mount_target(
+            FileSystemId=args.efs_id,
+            SubnetId=args.subnet_id,
+            SecurityGroups=[
+                efs_sg_id
+            ]
+        ).get('MountTargetId')
+        while efs_client.describe_mount_targets(
+                MountTargetId=mount_target_id).get('MountTargets')[0].get('LifeCycleState') != 'available':
+            time.sleep(10)
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating AWS mount target: {}'.format(str(err)))
+        raise Exception
+
+
+def efs_exist(return_id=False):
+    try:
+        efs_created = False
+        for efs in efs_client.describe_file_systems().get('FileSystems'):
+            if efs.get('Name') == args.service_base_name + '-efs':
+                if return_id:
+                    return efs.get('FileSystemId')
+                efs_created = True
+        return efs_created
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with getting AWS EFS: {}'.format(str(err)))
+        raise Exception
+
+
+def remove_efs():
+    try:
+        efs_id = efs_exist(True)
+        mount_targets = efs_client.describe_mount_targets(FileSystemId=efs_id).get('MountTargets')
+        for mount_target in mount_targets:
+            efs_client.delete_mount_target(MountTargetId=mount_target.get('MountTargetId'))
+        while efs_client.describe_file_systems(
+                FileSystemId=efs_id).get('FileSystems')[0].get('NumberOfMountTargets') != 0:
+            time.sleep(5)
+        efs_client.delete_file_system(FileSystemId=efs_id)
+        while efs_exist():
+            time.sleep(5)
+        print('AWS EFS has been deleted')
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with removing AWS EFS: {}'.format(str(err)))
+        raise Exception
+
+
+def ensure_ssh_user(initial_user):
+    try:
+        if not exists('/home/{}/.ssh_user_ensured'.format(initial_user)):
+            sudo('useradd -m -G sudo -s /bin/bash {0}'.format(configuration['conf_os_user']))
+            sudo('echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers'.format(configuration['conf_os_user']))
+            sudo('mkdir /home/{}/.ssh'.format(configuration['conf_os_user']))
+            sudo('chown -R {0}:{0} /home/{1}/.ssh/'.format(initial_user, configuration['conf_os_user']))
+            sudo('cat /home/{0}/.ssh/authorized_keys > /home/{1}/.ssh/authorized_keys'.format(
+                initial_user, configuration['conf_os_user']))
+            sudo('chown -R {0}:{0} /home/{0}/.ssh/'.format(configuration['conf_os_user']))
+            sudo('chmod 700 /home/{0}/.ssh'.format(configuration['conf_os_user']))
+            sudo('chmod 600 /home/{0}/.ssh/authorized_keys'.format(configuration['conf_os_user']))
+            sudo('mkdir /home/{}/.ensure_dir'.format(configuration['conf_os_user']))
+            sudo('touch /home/{}/.ssh_user_ensured'.format(initial_user))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with creating dlab-user: {}'.format(str(err)))
+        raise Exception
+
+
+def install_java():
+    try:
+        if not exists('/home/{}/.ensure_dir/java_ensured'.format(configuration['conf_os_user'])):
+            sudo('apt-get update')
+            sudo('apt-get install -y default-jdk ')
+            sudo('touch /home/{}/.ensure_dir/java_ensured'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with installing Java: {}'.format(str(err)))
+        raise Exception
+
+
+def install_groovy():
+    try:
+        if not exists('/home/{}/.ensure_dir/groovy_ensured'.format(configuration['conf_os_user'])):
+            sudo('apt-get install -y unzip')
+            sudo('mkdir /usr/local/groovy')
+            sudo('wget https://bintray.com/artifact/download/groovy/maven/apache-groovy-binary-{0}.zip -O \
+                  /tmp/apache-groovy-binary-{0}.zip'.format(groovy_version))
+            sudo('unzip /tmp/apache-groovy-binary-{}.zip -d \
+                  /usr/local/groovy'.format(groovy_version))
+            sudo('ln -s /usr/local/groovy/groovy-{} \
+                  /usr/local/groovy/latest'.format(groovy_version))
+            sudo('touch /home/{}/.ensure_dir/groovy_ensured'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with installing Groovy: {}'.format(str(err)))
+        raise Exception
+
+
+def nexus_service_waiter():
+    nexus_started = False
+    checks_count = 0
+    with hide('running'):
+        while not nexus_started and checks_count < 200:
+            print('Waiting nexus to be started...')
+            time.sleep(5)
+            result = sudo('nmap -p 8443 localhost | grep closed > /dev/null ; echo $?')
+            result = result[:1]
+            if result == '1':
+                nexus_started = True
+            else:
+                checks_count += 1
+    if not nexus_started and checks_count >= 200:
+        print('Error: Unable to start Nexus. Aborting...')
+        sys.exit(1)
+
+    
+def install_nexus():
+    try:
+        if not exists('/home/{}/.ensure_dir/nexus_ensured'.format(configuration['conf_os_user'])):
+            if args.efs_enabled == 'False':
+                mounting_disks()
+            else:
+                mount_efs()
+            sudo('apt-get install -y maven nmap python-pip')
+            sudo('pip2 install -UI pip')
+            sudo('pip2 install -U fabric==1.14.0')
+            sudo('mkdir -p /opt/nexus')
+            sudo('wget https://sonatype-download.global.ssl.fastly.net/nexus/{0}/nexus-{1}-unix.tar.gz -O \
+                  /opt/nexus-{1}-unix.tar.gz'.format(
+                  nexus_version.split('.')[0], nexus_version))
+            sudo('tar -zhxvf /opt/nexus-{}-unix.tar.gz -C /opt/'.format(
+                  nexus_version))
+            sudo('mv /opt/nexus-{}/* /opt/nexus/'.format(nexus_version))
+            sudo('mv /opt/nexus-{}/.[!.]* /opt/nexus/'.format(
+                  nexus_version))
+            sudo('rm -rf /opt/nexus-{}'.format(nexus_version))
+            sudo('useradd nexus')
+            sudo('echo \"run_as_user="nexus"\" > /opt/nexus/bin/nexus.rc')
+            create_keystore()
+            put('templates/jetty-https.xml', '/tmp/jetty-https.xml')
+            sudo('sed -i "s/KEYSTORE_PASSWORD/{}/g" /tmp/jetty-https.xml'.format(keystore_pass))
+            sudo('cp -f /tmp/jetty-https.xml /opt/nexus/etc/jetty/')
+            put('templates/nexus.service', '/tmp/nexus.service')
+            if args.efs_enabled == 'False':
+                sudo('sed -i "s|EFS_SERVICE||g" /tmp/nexus.service')
+            else:
+                sudo('sed -i "s|EFS_SERVICE|mount-efs-sequentially.service|g" /tmp/nexus.service')
+            sudo('cp /tmp/nexus.service /etc/systemd/system/')
+            put('files/nexus.properties', '/tmp/nexus.properties')
+            sudo('mkdir -p /opt/sonatype-work/nexus3/etc')
+            sudo('cp -f /tmp/nexus.properties /opt/sonatype-work/nexus3/etc/nexus.properties')
+            sudo('chown -R nexus:nexus /opt/nexus /opt/sonatype-work')
+            sudo('systemctl daemon-reload')
+            sudo('systemctl start nexus')
+            nexus_service_waiter()
+            sudo('systemctl enable nexus')
+            put('templates/configureNexus.groovy', '/tmp/configureNexus.groovy')
+            sudo('sed -i "s/REGION/{}/g" /tmp/configureNexus.groovy'.format(args.region))
+            sudo('sed -i "s/ADMIN_PASSWORD/{}/g" /tmp/configureNexus.groovy'.format(args.nexus_admin_password))
+            sudo('sed -i "s/SERVICE_USER_NAME/{}/g" /tmp/configureNexus.groovy'.format(args.nexus_service_user_name))
+            sudo('sed -i "s/SERVICE_USER_PASSWORD/{}/g" /tmp/configureNexus.groovy'.format(
+                args.nexus_service_user_password))
+            sudo('wget http://repo.{}.amazonaws.com/2017.09/main/mirror.list -O /tmp/main_mirror.list'.format(
+                args.region))
+            sudo('wget http://repo.{}.amazonaws.com/2017.09/updates/mirror.list -O /tmp/updates_mirror.list'.format(
+                args.region))
+            amazon_main_repo = sudo("cat /tmp/main_mirror.list  | grep {} | sed 's/$basearch//g'".format(args.region))
+            amazon_updates_repo = sudo("cat /tmp/updates_mirror.list  | grep {} | sed 's/$basearch//g'".format(
+                args.region))
+            sudo('sed -i "s|AMAZON_MAIN_URL|{}|g" /tmp/configureNexus.groovy'.format(amazon_main_repo))
+            sudo('sed -i "s|AMAZON_UPDATES_URL|{}|g" /tmp/configureNexus.groovy'.format(amazon_updates_repo))
+            sudo('rm -f /tmp/main_mirror.list')
+            sudo('rm -f /tmp/updates_mirror.list')
+            put('scripts/addUpdateScript.groovy', '/tmp/addUpdateScript.groovy')
+            script_executed = False
+            while not script_executed:
+                try:
+                    sudo('/usr/local/groovy/latest/bin/groovy /tmp/addUpdateScript.groovy -u "admin" -p "admin123" \
+                          -n "configureNexus" -f "/tmp/configureNexus.groovy" -h "http://localhost:8081"')
+                    script_executed = True
+                except:
+                    time.sleep(10)
+                    pass
+            sudo('curl -u admin:admin123 -X POST --header \'Content-Type: text/plain\' \
+                   http://localhost:8081/service/rest/v1/script/configureNexus/run')
+            sudo('systemctl stop nexus')
+            sudo('git clone https://github.com/sonatype-nexus-community/nexus-repository-apt')
+            with cd('nexus-repository-apt'):
+                sudo('mvn')
+            apt_plugin_version = sudo('find nexus-repository-apt/ -name "nexus-repository-apt-*.jar" '
+                                      '-printf "%f\\n" | grep -v "sources"').replace('nexus-repository-apt-',
+                                                                                     '').replace('.jar', '')
+            compress_plugin_version = sudo('find /opt/nexus/ -name "commons-compress-*.jar" '
+                                           '-printf "%f\\n" ').replace('commons-compress-', '').replace('.jar', '')
+            xz_plugin_version = sudo('find /opt/nexus/ -name "xz-*.jar" '
+                                     '-printf "%f\\n" ').replace('xz-', '').replace('.jar', '')
+            sudo('mkdir -p /opt/nexus/system/net/staticsnow/nexus-repository-apt/{0}/'.format(apt_plugin_version))
+            apt_plugin_jar_path = sudo('find nexus-repository-apt/ -name "nexus-repository-apt-{0}.jar"'.format(
+                apt_plugin_version))
+            sudo('cp -f {0} /opt/nexus/system/net/staticsnow/nexus-repository-apt/{1}/'.format(
+                apt_plugin_jar_path, apt_plugin_version
+            ))
+            sudo('sed -i "$ d" /opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'
+                 'nexus-core-feature-{0}-features.xml'.format(nexus_version))
+            sudo('''echo '<feature name="nexus-repository-apt" description="net.staticsnow:nexus-repository-apt" '''
+                 '''version="{1}">' >> /opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version, apt_plugin_version))
+            sudo('''echo '<details>net.staticsnow:nexus-repository-apt</details>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''echo '<bundle>mvn:net.staticsnow/nexus-repository-apt/{1}</bundle>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version, apt_plugin_version))
+            sudo('''echo '<bundle>mvn:org.apache.commons/commons-compress/{1}</bundle>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version, compress_plugin_version))
+            sudo('''echo '<bundle>mvn:org.tukaani/xz/{1}</bundle>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version, xz_plugin_version))
+            sudo('''echo '</feature>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''echo '</features>' >> '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/'''
+                 '''nexus-core-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''sed -i 's|<feature prerequisite=\"true\" dependency=\"false\">wrap</feature>|'''
+                 '''<feature prerequisite=\"true\" dependency=\"false\">wrap</feature>\\n'''
+                 '''<feature prerequisite=\"false\" dependency=\"false\">nexus-repository-apt</feature>|g' '''
+                 '''/opt/nexus/system/org/sonatype/nexus/assemblies/nexus-core-feature/{0}/nexus-core-feature-'''
+                 '''{0}-features.xml'''.format(nexus_version))
+            sudo('git clone https://github.com/sonatype-nexus-community/nexus-repository-r.git')
+            with cd('nexus-repository-r'):
+                sudo('mvn clean install')
+            r_plugin_version = sudo('find nexus-repository-r/ -name "nexus-repository-r-*.jar" '
+                                    '-printf "%f\\n" | grep -v "sources"').replace('nexus-repository-r-', '').replace(
+                '.jar', '')
+            sudo('mkdir -p /opt/nexus/system/org/sonatype/nexus/plugins/nexus-repository-r/{}/'.format(
+                r_plugin_version))
+            r_plugin_jar_path = sudo('find nexus-repository-r/ -name "nexus-repository-r-{0}.jar"'.format(
+                r_plugin_version))
+            sudo('cp -f {0} /opt/nexus/system/org/sonatype/nexus/plugins/nexus-repository-r/{1}/'.format(
+                r_plugin_jar_path, r_plugin_version
+            ))
+            sudo('sed -i "$ d" /opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'
+                 'nexus-oss-feature-{0}-features.xml'.format(nexus_version))
+            sudo('''echo '<feature name="nexus-repository-r" description="org.sonatype.nexus.plugins:'''
+                 '''nexus-repository-r" version="{1}">' >> /opt/nexus/system/com/sonatype/nexus/assemblies/'''
+                 '''nexus-oss-feature/{0}/nexus-oss-feature-{0}-features.xml'''.format(nexus_version, r_plugin_version))
+            sudo('''echo '<details>org.sonatype.nexus.plugins:nexus-repository-r</details>' >> '''
+                 '''/opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'''
+                 '''nexus-oss-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''echo '<bundle>mvn:org.sonatype.nexus.plugins/nexus-repository-r/{1}</bundle>' >> '''
+                 '''/opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'''
+                 '''nexus-oss-feature-{0}-features.xml'''.format(nexus_version, r_plugin_version))
+            sudo('''echo '</feature>' >> '''
+                 '''/opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'''
+                 '''nexus-oss-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''echo '</features>' >> '''
+                 '''/opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'''
+                 '''nexus-oss-feature-{0}-features.xml'''.format(nexus_version))
+            sudo('''sed -i 's|<feature prerequisite=\"true\" dependency=\"false\">wrap</feature>|'''
+                 '''<feature prerequisite=\"true\" dependency=\"false\">wrap</feature>\\n'''
+                 '''<feature version=\"{1}\" prerequisite=\"false\" dependency=\"false\">'''
+                 '''nexus-repository-r</feature>|g' '''
+                 '''/opt/nexus/system/com/sonatype/nexus/assemblies/nexus-oss-feature/{0}/'''
+                 '''nexus-oss-feature-{0}-features.xml'''.format(nexus_version, r_plugin_version))
+            sudo('chown -R nexus:nexus /opt/nexus')
+            sudo('systemctl start nexus')
+            nexus_service_waiter()
+            put('templates/addCustomRepository.groovy', '/tmp/addCustomRepository.groovy')
+            sudo('sed -i "s|REGION|{0}|g" /tmp/addCustomRepository.groovy'.format(args.region))
+            script_executed = False
+            while not script_executed:
+                try:
+                    sudo('/usr/local/groovy/latest/bin/groovy /tmp/addUpdateScript.groovy -u "admin" -p "{}" '
+                         '-n "addCustomRepository" -f "/tmp/addCustomRepository.groovy" -h '
+                         '"http://localhost:8081"'.format(args.nexus_admin_password))
+                    script_executed = True
+                except:
+                    time.sleep(10)
+                    pass
+            sudo('curl -u admin:{} -X POST --header \'Content-Type: text/plain\' '
+                 'http://localhost:8081/service/rest/v1/script/addCustomRepository/run'.format(
+                  args.nexus_admin_password))
+            sudo('echo "admin:{}" > /opt/nexus/credentials'.format(args.nexus_admin_password))
+            sudo('echo "{0}:{1}" >> /opt/nexus/credentials'.format(args.nexus_service_user_name,
+                                                                   args.nexus_service_user_password))
+            put('templates/updateRepositories.groovy', '/opt/nexus/updateRepositories.groovy', use_sudo=True)
+            put('scripts/update_amazon_repositories.py', '/opt/nexus/update_amazon_repositories.py', use_sudo=True)
+            sudo('sed -i "s|NEXUS_PASSWORD|{}|g" /opt/nexus/update_amazon_repositories.py'.format(
+                 args.nexus_admin_password))
+            sudo('touch /var/log/amazon_repo_update.log')
+            sudo('echo "0 0 * * * root /usr/bin/python /opt/nexus/update_amazon_repositories.py --region {} >> '
+                 '/var/log/amazon_repo_update.log" >> /etc/crontab'.format(args.region))
+            sudo('touch /home/{}/.ensure_dir/nexus_ensured'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with installing Nexus: {}'.format(str(err)))
+        raise Exception
+
+
+def install_nginx():
+    try:
+        if not exists('/home/{}/.ensure_dir/nginx_ensured'.format(configuration['conf_os_user'])):
+            hostname = sudo('hostname')
+            sudo('apt-get install -y nginx')
+            sudo('rm -f /etc/nginx/conf.d/* /etc/nginx/sites-enabled/default')
+            put('templates/nexus.conf', '/tmp/nexus.conf')
+            if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+                sudo('sed -i "s|SUBDOMAIN|{}|g" /tmp/nexus.conf'.format(args.subdomain))
+                sudo('sed -i "s|HOSTZONE|{}|g" /tmp/nexus.conf'.format(args.hosted_zone_name))
+            else:
+                sudo('sed -i "s|SUBDOMAIN.HOSTZONE|{}|g" /tmp/nexus.conf'.format(hostname))
+            sudo('sed -i "s|REGION|{}|g" /tmp/nexus.conf'.format(args.region))
+            sudo('cp /tmp/nexus.conf /etc/nginx/conf.d/nexus.conf'.format(args.subdomain, args.hosted_zone_name))
+            sudo('systemctl restart nginx')
+            sudo('systemctl enable nginx')
+            sudo('touch /home/{}/.ensure_dir/nginx_ensured'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Error with installing Nginx: {}'.format(str(err)))
+        raise Exception
+
+
+def mounting_disks():
+    try:
+        if not exists('/home/{}/.ensure_dir/additional_disk_mounted'.format(configuration['conf_os_user'])):
+            sudo('mkdir -p /opt/sonatype-work')
+            disk_name = sudo("lsblk | grep disk | awk '{print $1}' | sort | tail -n 1 | tr '\\n' ',' | sed 's|.$||g'")
+            sudo('bash -c \'echo -e "o\nn\np\n1\n\n\nw" | fdisk /dev/{}\' '.format(disk_name))
+            sudo('sleep 10')
+            partition_name = sudo("lsblk -r | grep part | grep {} | awk {} | sort | tail -n 1 | "
+                                  "tr '\\n' ',' | sed 's|.$||g'".format(disk_name, "'{print $1}'"))
+            sudo('mkfs.ext4 -F -q /dev/{}'.format(partition_name))
+            sudo('mount /dev/{0} /opt/sonatype-work'.format(partition_name))
+            sudo('bash -c "echo \'/dev/{} /opt/sonatype-work ext4 errors=remount-ro 0 1\' >> /etc/fstab"'.format(
+                partition_name))
+            sudo('touch /home/{}/.ensure_dir/additional_disk_mounted'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc(file=sys.stdout)
+        print('Failed to mount additional volume: {}'.format(str(err)))
+        raise Exception
+
+
+def mount_efs():
+    try:
+        if not exists('/home/{}/.ensure_dir/efs_mounted'.format(configuration['conf_os_user'])):
+            sudo('mkdir -p /opt/sonatype-work')
+            sudo('apt-get -y install binutils')
+            with cd('/tmp/'):
+                sudo('git clone https://github.com/aws/efs-utils')
+            with cd('/tmp/efs-utils'):
+                sudo('./build-deb.sh')
+                sudo('apt-get -y install ./build/amazon-efs-utils*deb')
+            sudo('sed -i "s/stunnel_check_cert_hostname.*/stunnel_check_cert_hostname = false/g" '
+                 '/etc/amazon/efs/efs-utils.conf')
+            sudo('sed -i "s/stunnel_check_cert_validity.*/stunnel_check_cert_validity = false/g" '
+                 '/etc/amazon/efs/efs-utils.conf')
+            sudo('mount -t efs -o tls {}:/ /opt/sonatype-work'.format(
+                args.efs_id))
+            sudo('bash -c "echo \'{}:/ /opt/sonatype-work efs tls,_netdev 0 0\' >> '
+                 '/etc/fstab"'.format(args.efs_id))
+            put('files/mount-efs-sequentially.service', '/tmp/mount-efs-sequentially.service')
+            sudo('cp /tmp/mount-efs-sequentially.service /etc/systemd/system/')
+            sudo('systemctl daemon-reload')
+            sudo('systemctl enable mount-efs-sequentially.service')
+            sudo('touch /home/{}/.ensure_dir/efs_mounted'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to mount additional volume: ', str(err))
+        sys.exit(1)
+
+
+def configure_ssl():
+    try:
+        if not exists('/home/{}/.ensure_dir/ssl_ensured'.format(configuration['conf_os_user'])):
+            hostname = sudo('hostname')
+            private_ip = sudo('curl http://169.254.169.254/latest/meta-data/local-ipv4')
+            subject_alt_name = 'subjectAltName = IP:{}'.format(private_ip)
+            if args.network_type == 'public':
+                public_ip = sudo('curl http://169.254.169.254/latest/meta-data/public-ipv4')
+                subject_alt_name += ',IP:{}'.format(public_ip)
+            sudo('cp /etc/ssl/openssl.cnf /tmp/openssl.cnf')
+            sudo('echo "[ subject_alt_name ]" >> /tmp/openssl.cnf')
+            sudo('echo "{}" >> /tmp/openssl.cnf'.format(subject_alt_name))
+            sudo('openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout /etc/ssl/certs/repository.key '
+                 '-out /etc/ssl/certs/repository.crt -subj "/C=US/ST=US/L=US/O=dlab/CN={}" -config '
+                 '/tmp/openssl.cnf -extensions subject_alt_name'.format(hostname))
+            sudo('openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048')
+            sudo('touch /home/{}/.ensure_dir/ssl_ensured'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to mount additional volume: ', str(err))
+        sys.exit(1)
+
+
+def set_hostname():
+    try:
+        if not exists('/home/{}/.ensure_dir/hostname_set'.format(configuration['conf_os_user'])):
+            if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+                hostname = '{0}.{1}'.format(args.subdomain, args.hosted_zone_name)
+            else:
+                if args.network_type == 'public':
+                    hostname = sudo('curl http://169.254.169.254/latest/meta-data/public-hostname')
+                else:
+                    hostname = sudo('curl http://169.254.169.254/latest/meta-data/hostname')
+            sudo('hostnamectl set-hostname {0}'.format(hostname))
+            sudo('touch /home/{}/.ensure_dir/hostname_set'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to mount additional volume: ', str(err))
+        sys.exit(1)
+
+
+def create_keystore():
+    try:
+        if not exists('/home/{}/.ensure_dir/keystore_created'.format(configuration['conf_os_user'])):
+            sudo('openssl pkcs12 -export -in /etc/ssl/certs/repository.crt -inkey /etc/ssl/certs/repository.key '
+                 '-out wildcard.p12 -passout pass:{}'.format(keystore_pass))
+            sudo('keytool -importkeystore  -deststorepass {0} -destkeypass {0} -srckeystore wildcard.p12 -srcstoretype '
+                 'PKCS12 -srcstorepass {0} -destkeystore /opt/nexus/etc/ssl/keystore.jks'.format(keystore_pass))
+            sudo('touch /home/{}/.ensure_dir/keystore_created'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to create keystore: ', str(err))
+        sys.exit(1)
+
+
+def download_packages():
+    try:
+        if not exists('/home/{}/.ensure_dir/packages_downloaded'.format(configuration['conf_os_user'])):
+            packages_urls = [
+                'https://pkg.jenkins.io/debian/jenkins-ci.org.key',
+                'http://mirrors.sonic.net/apache/maven/maven-{0}/{1}/binaries/apache-maven-{1}-bin.zip'.format(
+                    maven_version.split('.')[0], maven_version),
+                'https://nodejs.org/dist/v8.15.0/node-v8.15.0.tar.gz',
+                'https://github.com/sass/node-sass/releases/download/v4.11.0/linux-x64-57_binding.node',
+                'http://nginx.org/download/nginx-{}.tar.gz'.format(configuration['reverse_proxy_nginx_version']),
+                'http://www.scala-lang.org/files/archive/scala-{}.deb'.format(configuration['notebook_scala_version']),
+                'https://archive.apache.org/dist/spark/spark-{0}/spark-{0}-bin-hadoop{1}.tgz'.format(
+                    configuration['notebook_spark_version'], configuration['notebook_hadoop_version']),
+                'https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/{0}/hadoop-aws-{0}.jar'.format('2.7.4'),
+                'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk/{0}/aws-java-sdk-{0}.jar'.format('1.7.4'),
+                'https://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/{0}/hadoop-lzo-{0}.jar'.format('0.4.20'),
+                'http://central.maven.org/maven2/org/scalanlp/breeze_{0}/{1}/breeze_{0}-{1}.jar'.format('2.11', '0.12'),
+                'http://central.maven.org/maven2/org/scalanlp/breeze-natives_{0}/{1}/breeze-natives_{0}-{1}.jar'.format(
+                    '2.11', '0.12'),
+                'http://central.maven.org/maven2/org/scalanlp/breeze-viz_{0}/{1}/breeze-viz_{0}-{1}.jar'.format(
+                    '2.11', '0.12'),
+                'http://central.maven.org/maven2/org/scalanlp/breeze-macros_{0}/{1}/breeze-macros_{0}-{1}.jar'.format(
+                    '2.11', '0.12'),
+                'http://central.maven.org/maven2/org/scalanlp/breeze-parent_{0}/{1}/breeze-parent_{0}-{1}.jar'.format(
+                    '2.11', '0.12'),
+                'http://central.maven.org/maven2/org/jfree/jfreechart/{0}/jfreechart-{0}.jar'.format('1.0.19'),
+                'http://central.maven.org/maven2/org/jfree/jcommon/{0}/jcommon-{0}.jar'.format('1.0.24'),
+                '--no-check-certificate https://brunelvis.org/jar/spark-kernel-brunel-all-{0}.jar'.format('2.3'),
+                'http://archive.apache.org/dist/incubator/toree/0.2.0-incubating/toree-pip/toree-0.2.0.tar.gz',
+                'https://download2.rstudio.org/rstudio-server-{}-amd64.deb'.format(
+                    configuration['notebook_rstudio_version']),
+                'http://us.download.nvidia.com/XFree86/Linux-x86_64/{0}/NVIDIA-Linux-x86_64-{0}.run'.format(
+                    configuration['notebook_nvidia_version']),
+                'https://developer.nvidia.com/compute/cuda/{0}/prod/local_installers/{1}'.format(
+                    cuda_version_deeplearning, cuda_deeplearingn_file_name),
+                'https://developer.nvidia.com/compute/cuda/{0}/prod/local_installers/{1}'.format(
+                    configuration['notebook_cuda_version'], configuration['notebook_cuda_file_name']),
+                'http://developer.download.nvidia.com/compute/redist/cudnn/v{0}/{1}'.format(
+                    cudnn_version_deeplearning, cudnn_file_name_deeplearning),
+                'http://developer.download.nvidia.com/compute/redist/cudnn/v{0}/{1}'.format(
+                    configuration['notebook_cudnn_version'], configuration['notebook_cudnn_file_name']),
+                'https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-{}-cp27-none-'
+                'linux_x86_64.whl'.format(tensorflow_version_deeplearning),
+                'https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-{}-cp35-cp35m-'
+                'linux_x86_64.whl'.format(tensorflow_version_deeplearning),
+                'https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-{}-cp27-none-'
+                'linux_x86_64.whl'.format(configuration['notebook_tensorflow_version']),
+                'https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-{}-cp35-cp35m-'
+                'linux_x86_64.whl'.format(configuration['notebook_tensorflow_version']),
+                'https://cmake.org/files/v{1}/cmake-{0}.tar.gz'.format(
+                    configuration['notebook_cmake_version'],
+                    configuration['notebook_cmake_version'].split('.')[0] +
+                    "." + configuration['notebook_cmake_version'].split('.')[1]),
+                'https://cntk.ai/PythonWheel/GPU/cntk-{}-cp27-cp27mu-linux_x86_64.whl'.format(
+                    configuration['notebook_cntk_version']),
+                'https://cntk.ai/PythonWheel/GPU/cntk-{}-cp35-cp35m-linux_x86_64.whl'.format(
+                    configuration['notebook_cntk_version']),
+                'https://www.python.org/ftp/python/{0}/Python-{0}.tgz'.format(python3_version),
+                'http://archive.apache.org/dist/zeppelin/zeppelin-{0}/zeppelin-{0}-bin-netinst.tgz'.format(
+                    configuration['notebook_zeppelin_version']),
+                'http://archive.cloudera.com/beta/livy/livy-server-{}.zip'.format(
+                    configuration['notebook_livy_version']),
+                'https://dl.bintray.com/spark-packages/maven/tapanalyticstoolkit/spark-tensorflow-connector/'
+                '1.0.0-s_2.11/spark-tensorflow-connector-1.0.0-s_2.11.jar',
+                'https://archive.apache.org/dist/incubator/toree/0.2.0-incubating/toree/'
+                'toree-0.2.0-incubating-bin.tar.gz',
+                'https://repo1.maven.org/maven2/org/apache/toree/toree-assembly/0.2.0-incubating/'
+                'toree-assembly-0.2.0-incubating.jar',
+                'https://cran.r-project.org/src/contrib/Archive/keras/keras_{}.tar.gz'.format(
+                    configuration['notebook_keras_version'])
+            ]
+            packages_list = list()
+            for package in packages_urls:
+                package_name = package.split('/')[-1]
+                packages_list.append({'url': package, 'name': package_name})
+            run('mkdir packages')
+            with cd('packages'):
+                for package in packages_list:
+                    run('wget {0}'.format(package['url']))
+                    run('curl -v -u admin:{2} -F "raw.directory=/" -F '
+                        '"raw.asset1=@/home/{0}/packages/{1}" '
+                        '-F "raw.asset1.filename={1}"  '
+                        '"http://localhost:8081/service/rest/v1/components?repository=packages"'.format(
+                         configuration['conf_os_user'], package['name'], args.nexus_admin_password))
+            sudo('touch /home/{}/.ensure_dir/packages_downloaded'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to download packages: ', str(err))
+        sys.exit(1)
+
+
+def install_docker():
+    try:
+        if not exists('/home/{}/.ensure_dir/docker_installed'.format(configuration['conf_os_user'])):
+            sudo('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -')
+            sudo('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) '
+                 'stable"')
+            sudo('apt-get update')
+            sudo('apt-cache policy docker-ce')
+            sudo('apt-get install -y docker-ce={}~ce-0~ubuntu'.format(configuration['ssn_docker_version']))
+            sudo('usermod -a -G docker ' + configuration['conf_os_user'])
+            sudo('update-rc.d docker defaults')
+            sudo('update-rc.d docker enable')
+            sudo('touch /home/{}/.ensure_dir/docker_installed'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to install docker: ', str(err))
+        sys.exit(1)
+
+
+def prepare_images():
+    try:
+        if not exists('/home/{}/.ensure_dir/images_prepared'.format(configuration['conf_os_user'])):
+            put('files/Dockerfile', '/tmp/Dockerfile')
+            with cd('/tmp/'):
+                sudo('docker build --file Dockerfile -t pre-base .')
+            sudo('docker login -u {0} -p {1} localhost:8083'.format(args.nexus_service_user_name,
+                                                                    args.nexus_service_user_password))
+            sudo('docker tag pre-base localhost:8083/dlab-pre-base')
+            sudo('docker push localhost:8083/dlab-pre-base')
+            sudo('touch /home/{}/.ensure_dir/images_prepared'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to download packages: ', str(err))
+        sys.exit(1)
+
+
+def install_squid():
+    try:
+        if not exists('/home/{}/.ensure_dir/squid_installed'.format(configuration['conf_os_user'])):
+            sudo('apt-get -y install squid')
+            put('templates/squid.conf', '/etc/squid/', use_sudo=True)
+            replace_string = ''
+            for cidr in get_vpc_cidr_by_id(args.vpc_id):
+                replace_string += 'acl AWS_VPC_CIDR src {}\\n'.format(cidr)
+            sudo('sed -i "s|VPC_CIDRS|{}|g" /etc/squid/squid.conf'.format(replace_string))
+            replace_string = ''
+            for cidr in args.allowed_ip_cidr.split(','):
+                replace_string += 'acl AllowedCIDRS src {}\\n'.format(cidr)
+            sudo('sed -i "s|ALLOWED_CIDRS|{}|g" /etc/squid/squid.conf'.format(replace_string))
+            sudo('systemctl enable squid')
+            sudo('systemctl restart squid')
+            sudo('touch /home/{}/.ensure_dir/squid_installed'.format(configuration['conf_os_user']))
+    except Exception as err:
+        traceback.print_exc()
+        print('Failed to download packages: ', str(err))
+        sys.exit(1)
+
+
+if __name__ == "__main__":
+    if args.aws_access_key and args.aws_secret_access_key:
+        ec2_resource = boto3.resource('ec2', region_name=args.region, aws_access_key_id=args.aws_access_key,
+                                      aws_secret_access_key=args.aws_secret_access_key)
+        ec2_client = boto3.client('ec2', region_name=args.region, aws_access_key_id=args.aws_access_key,
+                                  aws_secret_access_key=args.aws_secret_access_key)
+        efs_client = boto3.client('efs', region_name=args.region, aws_access_key_id=args.aws_access_key,
+                                  aws_secret_access_key=args.aws_secret_access_key)
+        route53_client = boto3.client('route53', aws_access_key_id=args.aws_access_key,
+                                      aws_secret_access_key=args.aws_secret_access_key)
+    else:
+        ec2_resource = boto3.resource('ec2', region_name=args.region)
+        ec2_client = boto3.client('ec2', region_name=args.region)
+        efs_client = boto3.client('efs', region_name=args.region)
+        route53_client = boto3.client('route53')
+    tag_name = args.service_base_name + '-Tag'
+    pre_defined_vpc = True
+    pre_defined_subnet = True
+    pre_defined_sg = True
+    pre_defined_efs = True
+    if args.action != 'terminate' and args.dlab_conf_file_path == '':
+        print('Please provide argument --dlab_conf_file_path ! Aborting... ')
+        sys.exit(1)
+    configuration = dict()
+    config = SafeConfigParser()
+    config.read(args.dlab_conf_file_path)
+    for section in config.sections():
+        for option in config.options(section):
+            varname = "{0}_{1}".format(section, option)
+            configuration[varname] = config.get(section, option)
+    groovy_version = '2.5.1'
+    nexus_version = '3.15.2-01'
+    maven_version = '3.5.4'
+    cuda_version_deeplearning = '8.0'
+    cuda_deeplearingn_file_name = 'cuda_8.0.44_linux-run'
+    cudnn_version_deeplearning = '6.0'
+    cudnn_file_name_deeplearning = 'cudnn-8.0-linux-x64-v6.0.tgz'
+    tensorflow_version_deeplearning = '1.4.0'
+    python3_version = '3.4.0'
+    if args.nexus_admin_password == '':
+        args.nexus_admin_password = id_generator()
+    if args.nexus_service_user_password == '':
+        args.nexus_service_user_password = id_generator()
+    keystore_pass = id_generator()
+    if args.action == 'terminate':
+        if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+            remove_route_53_record(args.hosted_zone_id, args.hosted_zone_name, args.subdomain)
+        if elastic_ip_exist():
+            try:
+                association_id = elastic_ip_exist(True, 'AssociationId')
+                disassociate_elastic_ip(association_id)
+            except:
+                print("AWS Elastic IP address isn't associated with instance or there is an error "
+                      "with disassociating it")
+            release_elastic_ip()
+        if ec2_exist():
+            remove_ec2()
+        if efs_exist():
+            remove_efs()
+        if sg_exist():
+            remove_sgroups()
+        if subnet_exist():
+            remove_subnet()
+        if vpc_exist():
+            args.vpc_id = vpc_exist(True)
+            remove_internet_gateways(args.vpc_id, args.service_base_name)
+            remove_route_tables()
+            remove_vpc(args.vpc_id)
+    elif args.action == 'create':
+        if not args.vpc_id and not vpc_exist():
+            try:
+                print('[CREATING AWS VPC]')
+                args.vpc_id = create_vpc(args.vpc_cidr)
+                enable_vpc_dns(args.vpc_id)
+                rt_id = create_rt(args.vpc_id)
+                pre_defined_vpc = False
+            except:
+                remove_internet_gateways(args.vpc_id, args.service_base_name)
+                remove_route_tables()
+                remove_vpc(args.vpc_id)
+                sys.exit(1)
+        elif not args.vpc_id and vpc_exist():
+            args.vpc_id = vpc_exist(True)
+            pre_defined_vpc = False
+        print('AWS VPC ID: {}'.format(args.vpc_id))
+        if not args.subnet_id and not subnet_exist():
+            try:
+                print('[CREATING AWS SUBNET]')
+                args.subnet_id = create_subnet(args.vpc_id, args.subnet_cidr)
+                if args.network_type == 'public':
+                    enable_auto_assign_ip(args.subnet_id)
+                print("[ASSOCIATING ROUTE TABLE WITH THE SUBNET]")
+                rt = get_route_table_by_tag(args.service_base_name)
+                route_table = ec2_resource.RouteTable(rt)
+                route_table.associate_with_subnet(SubnetId=args.subnet_id)
+                pre_defined_subnet = False
+            except:
+                try:
+                    remove_subnet()
+                except:
+                    print("AWS Subnet hasn't been created or there is an error with removing it")
+                if not pre_defined_vpc:
+                    remove_internet_gateways(args.vpc_id, args.service_base_name)
+                    remove_route_tables()
+                    remove_vpc(args.vpc_id)
+                sys.exit(1)
+        if not args.subnet_id and subnet_exist():
+            args.subnet_id = subnet_exist(True)
+            pre_defined_subnet = False
+        print('AWS Subnet ID: {}'.format(args.subnet_id))
+        if not args.sg_id and not sg_exist():
+            try:
+                print('[CREATING AWS SECURITY GROUP]')
+                allowed_ip_cidr = list()
+                for cidr in args.allowed_ip_cidr.split(','):
+                    allowed_ip_cidr.append({"CidrIp": cidr.replace(' ', '')})
+                allowed_vpc_cidr_ip_ranges = list()
+                for cidr in get_vpc_cidr_by_id(args.vpc_id):
+                    allowed_vpc_cidr_ip_ranges.append({"CidrIp": cidr})
+                ingress = format_sg([
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 80,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 22,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 443,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 8082,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 8082, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 8083,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 8083, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 8083,
+                        "IpRanges": allowed_vpc_cidr_ip_ranges,
+                        "ToPort": 8083, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 3128,
+                        "IpRanges": allowed_ip_cidr,
+                        "ToPort": 3128, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 3128,
+                        "IpRanges": allowed_vpc_cidr_ip_ranges,
+                        "ToPort": 3128, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 80,
+                        "IpRanges": allowed_vpc_cidr_ip_ranges,
+                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 443,
+                        "IpRanges": allowed_vpc_cidr_ip_ranges,
+                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    },
+                    {
+                        "PrefixListIds": [],
+                        "FromPort": 8082,
+                        "IpRanges": allowed_vpc_cidr_ip_ranges,
+                        "ToPort": 8082, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                    }
+                ])
+                egress = format_sg([
+                    {"IpProtocol": "-1", "IpRanges": [{"CidrIp": '0.0.0.0/0'}], "UserIdGroupPairs": [],
+                     "PrefixListIds": []}
+                ])
+                tag = {"Key": tag_name, "Value": args.service_base_name}
+                name_tag = {"Key": "Name", "Value": args.service_base_name + "-sg"}
+                args.sg_id = create_security_group(args.service_base_name + '-sg', args.vpc_id, ingress, egress, tag,
+                                                   name_tag)
+                pre_defined_sg = False
+            except:
+                try:
+                    remove_sgroups()
+                except:
+                    print("AWS Security Group hasn't been created or there is an error with removing it")
+                    pass
+                if not pre_defined_subnet:
+                    remove_subnet()
+                if not pre_defined_vpc:
+                    remove_internet_gateways(args.vpc_id, args.service_base_name)
+                    remove_route_tables()
+                    remove_vpc(args.vpc_id)
+                sys.exit(1)
+        if not args.sg_id and sg_exist():
+            args.sg_id = sg_exist(True)
+            pre_defined_sg = False
+        print('AWS Security Group ID: {}'.format(args.sg_id))
+
+        if args.efs_enabled == 'True':
+            if not args.efs_id and not efs_exist():
+                try:
+                    print('[CREATING AWS EFS]')
+                    allowed_ip_cidr = list()
+                    for cidr in args.allowed_ip_cidr.split(','):
+                        allowed_ip_cidr.append({"CidrIp": cidr.replace(' ', '')})
+                    allowed_vpc_cidr_ip_ranges = list()
+                    for cidr in get_vpc_cidr_by_id(args.vpc_id):
+                        allowed_vpc_cidr_ip_ranges.append({"CidrIp": cidr})
+                    ingress = format_sg([
+                        {
+                            "PrefixListIds": [],
+                            "FromPort": 2049,
+                            "IpRanges": allowed_ip_cidr,
+                            "ToPort": 2049, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                        },
+                        {
+                            "PrefixListIds": [],
+                            "FromPort": 2049,
+                            "IpRanges": allowed_vpc_cidr_ip_ranges,
+                            "ToPort": 2049, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                        }
+                    ])
+                    egress = format_sg([
+                        {"IpProtocol": "-1", "IpRanges": [{"CidrIp": '0.0.0.0/0'}], "UserIdGroupPairs": [],
+                         "PrefixListIds": []}
+                    ])
+                    tag = {"Key": tag_name, "Value": args.service_base_name}
+                    name_tag = {"Key": "Name", "Value": args.service_base_name + "-efs-sg"}
+                    efs_sg_id = create_security_group(args.service_base_name + '-efs-sg', args.vpc_id, ingress, egress,
+                                                      tag, name_tag)
+                    args.efs_id = create_efs()
+                    mount_target_id = create_mount_target(efs_sg_id)
+                    pre_defined_efs = False
+                    create_efs_tag()
+                except:
+                    try:
+                        remove_efs()
+                    except:
+                        print("AWS EFS hasn't been created or there is an error with removing it")
+                    if not pre_defined_sg:
+                        remove_sgroups()
+                    if not pre_defined_subnet:
+                        remove_subnet()
+                    if not pre_defined_vpc:
+                        remove_internet_gateways(args.vpc_id, args.service_base_name)
+                        remove_route_tables()
+                        remove_vpc(args.vpc_id)
+                    sys.exit(1)
+            if not args.efs_id and efs_exist():
+                args.efs_id = efs_exist(True)
+                pre_defined_efs = False
+            print('AWS EFS ID: {}'.format(args.efs_id))
+
+        if not ec2_exist():
+            try:
+                print('[CREATING AWS EC2 INSTANCE]')
+                ec2_id = create_instance()
+            except:
+                try:
+                    remove_ec2()
+                except:
+                    print("AWS EC2 instance hasn't been created or there is an error with removing it")
+                if not pre_defined_efs:
+                    remove_efs()
+                if not pre_defined_sg:
+                    remove_sgroups()
+                if not pre_defined_subnet:
+                    remove_subnet()
+                if not pre_defined_vpc:
+                    remove_internet_gateways(args.vpc_id, args.service_base_name)
+                    remove_route_tables()
+                    remove_vpc(args.vpc_id)
+                sys.exit(1)
+        else:
+            ec2_id = ec2_exist(True)
+
+        if args.network_type == 'public':
+            if not elastic_ip_exist():
+                try:
+                    print('[ALLOCATING AWS ELASTIC IP ADDRESS]')
+                    allocate_elastic_ip()
+                except:
+                    try:
+                        release_elastic_ip()
+                    except:
+                        print("AWS Elastic IP address hasn't been created or there is an error with removing it")
+                    remove_ec2()
+                    if not pre_defined_efs:
+                        remove_efs()
+                    if not pre_defined_sg:
+                        remove_sgroups()
+                    if not pre_defined_subnet:
+                        remove_subnet()
+                    if not pre_defined_vpc:
+                        remove_internet_gateways(args.vpc_id, args.service_base_name)
+                        remove_route_tables()
+                        remove_vpc(args.vpc_id)
+                    sys.exit(1)
+            try:
+                print('[ASSOCIATING AWS ELASTIC IP ADDRESS TO EC2 INSTANCE]')
+                allocation_id = elastic_ip_exist(True)
+                associate_elastic_ip(ec2_id, allocation_id)
+                time.sleep(30)
+            except:
+                try:
+                    association_id = elastic_ip_exist(True, 'AssociationId')
+                    disassociate_elastic_ip(association_id)
+                except:
+                    print("AWS Elastic IP address hasn't been associated or there is an error with disassociating it")
+                release_elastic_ip()
+                remove_ec2()
+                if not pre_defined_efs:
+                    remove_efs()
+                if not pre_defined_sg:
+                    remove_sgroups()
+                if not pre_defined_subnet:
+                    remove_subnet()
+                if not pre_defined_vpc:
+                    remove_internet_gateways(args.vpc_id, args.service_base_name)
+                    remove_route_tables()
+                    remove_vpc(args.vpc_id)
+                sys.exit(1)
+
+        if args.network_type == 'public':
+            ec2_ip_address = get_instance_ip_address_by_id(ec2_id, 'public_ip_address')
+        else:
+            ec2_ip_address = get_instance_ip_address_by_id(ec2_id, 'private_ip_address')
+
+        if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+            try:
+                print('[CREATING AWS ROUTE53 RECORD]')
+                create_route_53_record(args.hosted_zone_id, args.hosted_zone_name, args.subdomain, ec2_ip_address)
+            except:
+                try:
+                    remove_route_53_record(args.hosted_zone_id, args.hosted_zone_name, args.subdomain)
+                except:
+                    print("AWS Route53 record hasn't been created or there is an error with removing it")
+                if args.network_type == 'public':
+                    association_id = elastic_ip_exist(True, 'AssociationId')
+                    disassociate_elastic_ip(association_id)
+                    release_elastic_ip()
+                remove_ec2()
+                if not pre_defined_efs:
+                    remove_efs()
+                if not pre_defined_sg:
+                    remove_sgroups()
+                if not pre_defined_subnet:
+                    remove_subnet()
+                if not pre_defined_vpc:
+                    remove_internet_gateways(args.vpc_id, args.service_base_name)
+                    remove_route_tables()
+                    remove_vpc(args.vpc_id)
+                sys.exit(1)
+
+        print("CONFIGURE CONNECTIONS")
+        env['connection_attempts'] = 100
+        env.key_filename = [args.key_path + args.key_name + '.pem']
+        env.host_string = 'ubuntu@' + ec2_ip_address
+        print("CONFIGURE LOCAL REPOSITORY")
+        try:
+            print('CREATING DLAB-USER')
+            ensure_ssh_user('ubuntu')
+            env.host_string = configuration['conf_os_user'] + '@' + ec2_ip_address
+
+            print('SETTING HOSTNAME')
+            set_hostname()
+
+            print('INSTALLING JAVA')
+            install_java()
+
+            print('INSTALLING GROOVY')
+            install_groovy()
+
+            print('CONFIGURING SSL CERTS')
+            configure_ssl()
+
+            print('INSTALLING NEXUS')
+            install_nexus()
+
+            print('INSTALLING NGINX')
+            install_nginx()
+
+            print('DOWNLOADING REQUIRED PACKAGES')
+            download_packages()
+
+            print('INSTALLING DOCKER')
+            install_docker()
+
+            print('PREPARING DLAB DOCKER IMAGES')
+            prepare_images()
+
+            print('INSTALLING SQUID')
+            install_squid()
+
+            if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+                nexus_host = "{0}.{1}".format(args.subdomain, args.hosted_zone_name)
+            else:
+                nexus_host = ec2_ip_address
+
+            print('[SUMMARY]')
+            print("AWS VPC ID: {0}".format(args.vpc_id))
+            print("AWS Subnet ID: {0}".format(args.subnet_id))
+            print("AWS Security Group ID: {0}".format(args.sg_id))
+            print("AWS EC2 ID: {0}".format(ec2_id))
+            print("AWS EC2 IP address: {0}".format(ec2_ip_address))
+            print("SSL certificate path: /etc/ssl/certs/repository.crt")
+            print("Service user credentials: {0}/{1}".format(args.nexus_service_user_name,
+                                                             args.nexus_service_user_password))
+            print("PyPi repository URL: https://{0}/repository/pypi".format(nexus_host))
+            print("Maven-central repository URL: https://{0}/repository/maven-central".format(nexus_host))
+            print("Maven-bintray repository URL: https://{0}/repository/maven-bintray".format(nexus_host))
+            print("Docker-internal repository URL: {0}:8083".format(nexus_host))
+            print("Docker repository URL: https://{0}/repository/docker".format(nexus_host))
+            print("Jenkins repository URL: https://{0}/repository/jenkins".format(nexus_host))
+            print("Mongo repository URL: https://{0}/repository/mongo".format(nexus_host))
+            print("Packages repository URL: https://{0}/repository/packages".format(nexus_host))
+            print("NPM repository URL: https://{0}/repository/npm".format(nexus_host))
+            print("Ubuntu repository URL: https://{0}/repository/ubuntu".format(nexus_host))
+            print("Ubuntu-security repository URL: https://{0}/repository/ubuntu-security".format(nexus_host))
+            print("Ubuntu-bintray repository URL: https://{0}/repository/ubuntu-bintray".format(nexus_host))
+            print("Ubuntu-canonical repository URL: https://{0}/repository/ubuntu-canonical".format(nexus_host))
+            print("Rrutter repository URL: https://{0}/repository/rrutter".format(nexus_host))
+            print("R repository URL: https://{0}/repository/r".format(nexus_host))
+            print("Amazon-main repository URL: https://{0}/repository/amazon-main".format(nexus_host))
+            print("Amazon-updates repository URL: https://{0}/repository/amazon-updates".format(nexus_host))
+            print("Squid proxy: {0}:3128".format(nexus_host))
+            if args.efs_id:
+                print('AWS EFS ID: {}'.format(args.efs_id))
+            if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+                print("DNS name: {0}".format(args.subdomain + '.' + args.hosted_zone_name))
+        except:
+            if args.hosted_zone_id and args.hosted_zone_name and args.subdomain:
+                remove_route_53_record(args.hosted_zone_id, args.hosted_zone_name, args.subdomain)
+            if args.network_type == 'public':
+                association_id = elastic_ip_exist(True, 'AssociationId')
+                disassociate_elastic_ip(association_id)
+                release_elastic_ip()
+            remove_ec2()
+            if not pre_defined_efs:
+                remove_efs()
+            if not pre_defined_sg:
+                remove_sgroups()
+            if not pre_defined_subnet:
+                remove_subnet()
+            if not pre_defined_vpc:
+                remove_internet_gateways(args.vpc_id, args.service_base_name)
+                remove_route_tables()
+                remove_vpc(args.vpc_id)
+            sys.exit(1)
+
+    else:
+        print('Invalid action: {}'.format(args.action))
diff --git a/infrastructure-provisioning/scripts/deploy_repository/files/Dockerfile b/infrastructure-provisioning/scripts/deploy_repository/files/Dockerfile
new file mode 100644
index 0000000..48f22da
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/files/Dockerfile
@@ -0,0 +1,29 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+FROM ubuntu:16.04
+
+# Install any .deb dependecies
+RUN	apt-get update && \
+    apt-get -y upgrade && \
+    apt-get -y install python-pip python-dev groff vim less git wget nano libssl-dev libffi-dev libffi6 && \
+    apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+# Install any python dependencies
+RUN pip install -UI pip==9.0.3
+RUN pip install boto3 backoff fabric==1.14.0 fabvenv awscli argparse ujson jupyter pycrypto
\ No newline at end of file
diff --git a/infrastructure-provisioning/scripts/deploy_repository/files/mount-efs-sequentially.service b/infrastructure-provisioning/scripts/deploy_repository/files/mount-efs-sequentially.service
new file mode 100644
index 0000000..7bc54d5
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/files/mount-efs-sequentially.service
@@ -0,0 +1,29 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+[Unit]
+Description=Mounting EFS file systems sequentially at boot time
+After=remote-fs.target
+
+[Service]
+Type=oneshot
+ExecStart=/bin/mount -avt efs
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/infrastructure-provisioning/scripts/deploy_repository/files/nexus.properties b/infrastructure-provisioning/scripts/deploy_repository/files/nexus.properties
new file mode 100644
index 0000000..0dc042e
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/files/nexus.properties
@@ -0,0 +1,20 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+application-port-ssl=8443
+nexus-args=${jetty.etc}/jetty.xml,${jetty.etc}/jetty-http.xml,${jetty.etc}/jetty-requestlog.xml,${jetty.etc}/jetty-https.xml
diff --git a/infrastructure-provisioning/scripts/deploy_repository/scripts/addUpdateScript.groovy b/infrastructure-provisioning/scripts/deploy_repository/scripts/addUpdateScript.groovy
new file mode 100644
index 0000000..b45a057
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/scripts/addUpdateScript.groovy
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019, EPAM SYSTEMS INC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+@Grab('org.sonatype.nexus:nexus-rest-client:3.9.0-01')
+@Grab('org.sonatype.nexus:nexus-rest-jackson2:3.9.0-01')
+@Grab('org.sonatype.nexus:nexus-script:3.9.0-01')
+@Grab('org.codehaus.groovy:groovy-backports-compat23:2.4.5')
+@Grab('org.jboss.spec.javax.servlet:jboss-servlet-api_3.1_spec:1.0.0.Final')
+@Grab('com.fasterxml.jackson.core:jackson-core:2.8.6')
+@Grab('com.fasterxml.jackson.core:jackson-databind:2.8.6')
+@Grab('com.fasterxml.jackson.core:jackson-annotations:2.8.6')
+@Grab('com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.8.6')
+@Grab('org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.1.Beta1')
+@Grab('org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.0.Final')
+@Grab('javax.activation:activation:1.1.1')
+@Grab('net.jcip:jcip-annotations:1.0')
+@Grab('org.jboss.logging:jboss-logging-annotations:2.0.1.Final')
+@Grab('org.jboss.logging:jboss-logging-processor:2.0.1.Final')
+@Grab('com.sun.xml.bind:jaxb-impl:2.2.7')
+@Grab('com.sun.mail:javax.mail:1.5.6')
+@Grab('org.apache.james:apache-mime4j:0.6')
+@GrabExclude('org.codehaus.groovy:groovy-all')
+import javax.ws.rs.NotFoundException
+
+import org.sonatype.nexus.script.ScriptClient
+import org.sonatype.nexus.script.ScriptXO
+
+import org.jboss.resteasy.client.jaxrs.BasicAuthentication
+import org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder
+
+CliBuilder cli = new CliBuilder(
+    usage: 'groovy addUpdateScript.groovy -u admin -p admin123 -f scriptFile.groovy [-n explicitName] [-h nx3Url]')
+cli.with {
+  u longOpt: 'username', args: 1, required: true, 'A User with permission to use the NX3 Script resource'
+  p longOpt: 'password', args: 1, required: true, 'Password for given User'
+  f longOpt: 'file', args: 1, required: true, 'Script file to send to NX3'
+  h longOpt: 'host', args: 1, 'NX3 host url (including port if necessary). Defaults to http://localhost:8081'
+  n longOpt: 'name', args: 1, 'Name to store Script file under. Defaults to the name of the Script file.'
+}
+def options = cli.parse(args)
+if (!options) {
+  return
+}
+
+def file = new File(options.f)
+assert file.exists()
+
+def host = options.h ?: 'http://localhost:8081'
+def resource = 'service/rest'
+
+ScriptClient scripts = new ResteasyClientBuilder()
+    .build()
+    .register(new BasicAuthentication(options.u, options.p))
+    .target("$host/$resource")
+    .proxy(ScriptClient)
+
+String name = options.n ?: file.name
+
+// Look to see if a script with this name already exists so we can update if necessary
+boolean newScript = true
+try {
+  scripts.read(name)
+  newScript = false
+  println "Existing Script named '$name' will be updated"
+}
+catch (NotFoundException e) {
+  println "Script named '$name' will be created"
+}
+
+def script = new ScriptXO(name, file.text, 'groovy')
+if (newScript) {
+  scripts.add(script)
+}
+else {
+  scripts.edit(name, script)
+}
+
+println "Stored scripts are now: ${scripts.browse().collect { it.name }}"
diff --git a/infrastructure-provisioning/scripts/deploy_repository/scripts/update_amazon_repositories.py b/infrastructure-provisioning/scripts/deploy_repository/scripts/update_amazon_repositories.py
new file mode 100644
index 0000000..df8e20f
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/scripts/update_amazon_repositories.py
@@ -0,0 +1,53 @@
+#!/usr/bin/python
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+
+from fabric.api import *
+import argparse
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--region', required=True, type=str, default='', help='AWS region name')
+args = parser.parse_args()
+
+
+if __name__ == "__main__":
+    nexus_password = 'NEXUS_PASSWORD'
+    local('wget http://repo.{}.amazonaws.com/2017.09/main/mirror.list -O /tmp/main_mirror.list'.format(args.region))
+    local('wget http://repo.{}.amazonaws.com/2017.09/updates/mirror.list -O /tmp/updates_mirror.list'.format(
+        args.region))
+    amazon_main_repo = local("cat /tmp/main_mirror.list  | grep {} | sed 's/$basearch//g'".format(args.region),
+                             capture=True)
+    amazon_updates_repo = local("cat /tmp/updates_mirror.list  | grep {} | sed 's/$basearch//g'".format(args.region),
+                                capture=True)
+    local('cp -f /opt/nexus/updateRepositories.groovy /tmp/updateRepositories.groovy')
+    local('sed -i "s|AMAZON_MAIN_URL|{}|g" /tmp/updateRepositories.groovy'.format(amazon_main_repo))
+    local('sed -i "s|AMAZON_UPDATES_URL|{}|g" /tmp/updateRepositories.groovy'.format(amazon_updates_repo))
+    local('/usr/local/groovy/latest/bin/groovy /tmp/addUpdateScript.groovy -u "admin" -p "{}" '
+          '-n "updateRepositories" -f "/tmp/updateRepositories.groovy" -h "http://localhost:8081"'.format(
+           nexus_password))
+    local('curl -u admin:{} -X POST --header \'Content-Type: text/plain\' '
+          'http://localhost:8081/service/rest/v1/script/updateRepositories/run'.format(nexus_password))
+    local('rm -f /tmp/main_mirror.list')
+    local('rm -f /tmp/updates_mirror.list')
+    local('rm -f /tmp/updateRepositories.groovy')
+    print('Amazon repositories have been successfully updated!')
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy b/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy
new file mode 100644
index 0000000..6325a65
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/addCustomRepository.groovy
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2019, EPAM SYSTEMS INC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.sonatype.nexus.repository.config.Configuration
+import org.sonatype.nexus.blobstore.api.BlobStoreManager
+import org.sonatype.nexus.repository.storage.WritePolicy
+
+
+ubuntuProxyConfiguration = new Configuration(
+        repositoryName: "ubuntu",
+        recipeName: "apt-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                apt: [
+                        distribution : 'xenial',
+                        flat : false
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'http://REGION.ec2.archive.ubuntu.com/ubuntu/',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+securityProxyConfiguration = new Configuration(
+        repositoryName: "ubuntu-security",
+        recipeName: "apt-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                apt: [
+                        distribution : 'xenial',
+                        flat : false
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'http://security.ubuntu.com/ubuntu',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+BintrayDebianProxyConfiguration = new Configuration(
+        repositoryName: "ubuntu-bintray",
+        recipeName: "apt-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                apt: [
+                        distribution : 'xenial',
+                        flat : false
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'https://dl.bintray.com/sbt/debian',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+RrutterDebianProxyConfiguration = new Configuration(
+        repositoryName: "rrutter",
+        recipeName: "apt-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                apt: [
+                        distribution : 'xenial',
+                        flat : false
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'http://ppa.launchpad.net/marutter/rrutter/ubuntu',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+CanonicalDebianProxyConfiguration = new Configuration(
+        repositoryName: "ubuntu-canonical",
+        recipeName: "apt-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                apt: [
+                        distribution : 'xenial',
+                        flat : false
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'http://archive.canonical.com/ubuntu',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+RProxyConfiguration = new Configuration(
+        repositoryName: "r",
+        recipeName: "r-proxy",
+        online: true,
+        attributes: [
+                storage: [
+                        blobStoreName              : 'packages_store',
+                        writePolicy                : WritePolicy.ALLOW,
+                        strictContentTypeValidation: true
+                ] as Map,
+                httpclient   : [
+                        connection: [
+                                blocked  : false,
+                                autoBlock: true
+                        ] as Map
+                ] as Map,
+                proxy: [
+                        remoteUrl: 'http://cran.us.r-project.org',
+                        contentMaxAge: 0,
+                        metaDataMaxAge: 0
+                ] as Map,
+                negativeCache: [
+                        enabled   : true,
+                        timeToLive: 1440
+                ] as Map,
+        ] as Map
+)
+repository.createRepository(RProxyConfiguration)
+repository.createRepository(ubuntuProxyConfiguration)
+repository.createRepository(securityProxyConfiguration)
+repository.createRepository(BintrayDebianProxyConfiguration)
+repository.createRepository(RrutterDebianProxyConfiguration)
+repository.createRepository(CanonicalDebianProxyConfiguration)
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/configureNexus.groovy b/infrastructure-provisioning/scripts/deploy_repository/templates/configureNexus.groovy
new file mode 100644
index 0000000..5c94123
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/configureNexus.groovy
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, EPAM SYSTEMS INC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.sonatype.nexus.repository.storage.WritePolicy
+import org.sonatype.nexus.repository.maven.VersionPolicy
+import org.sonatype.nexus.repository.maven.LayoutPolicy
+import org.sonatype.nexus.common.entity.*
+import org.sonatype.nexus.security.*
+import org.sonatype.nexus.security.authz.*
+import org.sonatype.nexus.ldap.persist.*
+import org.sonatype.nexus.ldap.persist.entity.*
+import org.sonatype.nexus.scheduling.TaskScheduler
+import org.sonatype.nexus.scheduling.schedule.Daily
+import org.sonatype.nexus.scheduling.schedule.Hourly
+import org.sonatype.nexus.security.realm.RealmManager
+
+def securitySystem = container.lookup(SecuritySystem.class.name)
+def authorizationManager = securitySystem.getAuthorizationManager('default')
+def manager = container.lookup(LdapConfigurationManager.class.name)
+
+//Removing default repositories
+repository.getRepositoryManager().delete('maven-central');
+repository.getRepositoryManager().delete('maven-public');
+repository.getRepositoryManager().delete('maven-releases');
+repository.getRepositoryManager().delete('maven-snapshots');
+repository.getRepositoryManager().delete('nuget-group');
+repository.getRepositoryManager().delete('nuget-hosted');
+repository.getRepositoryManager().delete('nuget.org-proxy');
+
+//Creating custom repositories
+blobStore.createFileBlobStore('artifacts_store', 'artifacts_store')
+blobStore.createFileBlobStore('packages_store', 'packages_store')
+blobStore.createFileBlobStore('docker_store', 'docker_store')
+repository.createPyPiProxy('pypi','https://pypi.org/', 'packages_store', true)
+repository.createMavenProxy('maven-central','https://repo1.maven.org/maven2/', 'artifacts_store', true, VersionPolicy.RELEASE, LayoutPolicy.PERMISSIVE)
+repository.createMavenProxy('maven-bintray','https://dl.bintray.com/michaelklishin/maven/', 'artifacts_store', true, VersionPolicy.RELEASE, LayoutPolicy.PERMISSIVE)
+repository.createDockerHosted('docker-internal', null, 8083, 'docker_store', true, true)
+repository.createRawProxy('docker','https://download.docker.com/linux/ubuntu', 'packages_store')
+repository.createRawProxy('jenkins','http://pkg.jenkins.io/debian-stable', 'packages_store')
+repository.createRawProxy('mongo','http://repo.mongodb.org/apt/ubuntu', 'packages_store')
+repository.createRawHosted('packages', 'packages_store')
+repository.createNpmProxy('npm', 'https://registry.npmjs.org', 'packages_store')
+repository.createRawProxy('amazon-main','AMAZON_MAIN_URL', 'packages_store')
+repository.createRawProxy('amazon-updates','AMAZON_UPDATES_URL', 'packages_store')
+
+// create a role for service user
+def role = new org.sonatype.nexus.security.role.Role(
+    roleId: "nx-dlab",
+    source: "Nexus",
+    name: "nx-dlab",
+    description: null,
+    readOnly: false,
+    privileges: [ 'nx-repository-view-*-*-*' ],
+    roles: []
+)
+authorizationManager.addRole(role)
+
+// add a service user account
+security.addUser("SERVICE_USER_NAME",
+      "DLab", "Nexus",
+      "dlab-nexus@example.org", true,
+      "SERVICE_USER_PASSWORD", [ role.roleId ])
+
+security.securitySystem.changePassword('admin','ADMIN_PASSWORD')
+security.setAnonymousAccess(false)
+realmManager = container.lookup(RealmManager.class.getName())
+realmManager.enableRealm("NpmToken", true)
+log.info('Script completed successfully')
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/jetty-https.xml b/infrastructure-provisioning/scripts/deploy_repository/templates/jetty-https.xml
new file mode 100644
index 0000000..6c080b7
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/jetty-https.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0"?>
+<!--
+
+Copyright (c) 2019, EPAM SYSTEMS INC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+-->
+<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
+<Configure id="Server" class="org.eclipse.jetty.server.Server">
+
+  <!--
+  ==== HTTPS ====
+  Set the following inside nexus.properties:
+  application-port-ssl: the port to listen for https connections
+  -->
+
+  <Ref refid="httpConfig">
+    <Set name="secureScheme">https</Set>
+    <Set name="securePort"><Property name="application-port-ssl" /></Set>
+  </Ref>
+
+  <New id="httpsConfig" class="org.eclipse.jetty.server.HttpConfiguration">
+    <Arg><Ref refid="httpConfig"/></Arg>
+    <Call name="addCustomizer">
+      <Arg><New class="org.eclipse.jetty.server.SecureRequestCustomizer"/></Arg>
+    </Call>
+  </New>
+
+  <New id="sslContextFactory" class="org.eclipse.jetty.util.ssl.SslContextFactory">
+    <Set name="KeyStorePath"><Property name="ssl.etc"/>/keystore.jks</Set>
+    <Set name="KeyStorePassword">KEYSTORE_PASSWORD</Set>
+    <Set name="KeyManagerPassword">KEYSTORE_PASSWORD</Set>
+    <Set name="TrustStorePath"><Property name="ssl.etc"/>/keystore.jks</Set>
+    <Set name="TrustStorePassword">KEYSTORE_PASSWORD</Set>
+    <Set name="EndpointIdentificationAlgorithm"></Set>
+    <Set name="NeedClientAuth"><Property name="jetty.ssl.needClientAuth" default="false"/></Set>
+    <Set name="WantClientAuth"><Property name="jetty.ssl.wantClientAuth" default="false"/></Set>
+    <Set name="ExcludeCipherSuites">
+      <Array type="String">
+        <Item>SSL_RSA_WITH_DES_CBC_SHA</Item>
+        <Item>SSL_DHE_RSA_WITH_DES_CBC_SHA</Item>
+        <Item>SSL_DHE_DSS_WITH_DES_CBC_SHA</Item>
+        <Item>SSL_RSA_EXPORT_WITH_RC4_40_MD5</Item>
+        <Item>SSL_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
+        <Item>SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA</Item>
+        <Item>SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA</Item>
+      </Array>
+    </Set>
+  </New>
+
+  <Call  name="addConnector">
+    <Arg>
+      <New id="httpsConnector" class="org.eclipse.jetty.server.ServerConnector">
+        <Arg name="server"><Ref refid="Server" /></Arg>
+        <Arg name="acceptors" type="int"><Property name="jetty.https.acceptors" default="-1"/></Arg>
+        <Arg name="selectors" type="int"><Property name="jetty.https.selectors" default="-1"/></Arg>
+        <Arg name="factories">
+          <Array type="org.eclipse.jetty.server.ConnectionFactory">
+            <Item>
+              <New class="org.sonatype.nexus.bootstrap.jetty.InstrumentedConnectionFactory">
+                <Arg>
+                  <New class="org.eclipse.jetty.server.SslConnectionFactory">
+                    <Arg name="next">http/1.1</Arg>
+                    <Arg name="sslContextFactory"><Ref refid="sslContextFactory"/></Arg>
+                  </New>
+                </Arg>
+              </New>
+            </Item>
+            <Item>
+              <New class="org.eclipse.jetty.server.HttpConnectionFactory">
+                <Arg name="config"><Ref refid="httpsConfig" /></Arg>
+              </New>
+            </Item>
+          </Array>
+        </Arg>
+
+        <Set name="host"><Property name="application-host" /></Set>
+        <Set name="port"><Property name="application-port-ssl" /></Set>
+        <Set name="idleTimeout"><Property name="jetty.https.timeout" default="30000"/></Set>
+        <Set name="soLingerTime"><Property name="jetty.https.soLingerTime" default="-1"/></Set>
+        <Set name="acceptorPriorityDelta"><Property name="jetty.https.acceptorPriorityDelta" default="0"/></Set>
+        <Set name="acceptQueueSize"><Property name="jetty.https.acceptQueueSize" default="0"/></Set>
+      </New>
+    </Arg>
+  </Call>
+
+</Configure>
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.conf b/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.conf
new file mode 100644
index 0000000..327f66d
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.conf
@@ -0,0 +1,76 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+server {
+    listen 80;
+    server_name SUBDOMAIN.HOSTZONE;
+
+    location /iam {
+        proxy_pass https://iam.amazonaws.com;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+        client_max_body_size 500M;
+    }
+    location /emr {
+        proxy_pass https://REGION.elasticmapreduce.amazonaws.com;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+        client_max_body_size 500M;
+    }
+    location /pricing {
+        proxy_pass https://api.pricing.us-east-1.amazonaws.com;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+        client_max_body_size 500M;
+    }
+    location / {
+        return 301 https://$host$request_uri;
+    }
+}
+
+server {
+    listen 443 ssl http2;
+    server_name SUBDOMAIN.HOSTZONE;
+
+    ssl_certificate  /etc/ssl/certs/repository.crt;
+    ssl_certificate_key  /etc/ssl/certs/repository.key;
+    ssl_session_timeout 1d;
+    ssl_session_cache shared:SSL:50m;
+    ssl_session_tickets off;
+    ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+    ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA25 [...]
+    ssl_prefer_server_ciphers on;
+    ssl_dhparam /etc/ssl/certs/dhparam.pem;
+
+    charset utf-8;
+
+    location / {
+        proxy_pass http://localhost:8081;
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header X-Forwarded-Proto $scheme;
+        client_max_body_size 500M;
+    }
+}
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.service b/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.service
new file mode 100644
index 0000000..65a573c
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/nexus.service
@@ -0,0 +1,32 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+[Unit]
+Description=nexus service
+After=network.target EFS_SERVICE
+
+[Service]
+Type=forking
+LimitNOFILE=65536
+ExecStart=/opt/nexus/bin/nexus start
+ExecStop=/opt/nexus/bin/nexus stop
+User=nexus
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/squid.conf b/infrastructure-provisioning/scripts/deploy_repository/templates/squid.conf
new file mode 100644
index 0000000..dd3b0a7
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/squid.conf
@@ -0,0 +1,55 @@
+# *****************************************************************************
+#
+# Copyright (c) 2019, EPAM SYSTEMS INC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ******************************************************************************
+
+VPC_CIDRS
+ALLOWED_CIDRS
+acl maven dstdomain search.maven.org
+
+acl SSL_ports port 443
+acl Safe_ports port 80          # http
+acl Safe_ports port 21          # ftp
+acl Safe_ports port 22          # ssh
+acl Safe_ports port 443         # https
+acl Safe_ports port 70          # gopher
+acl Safe_ports port 210         # wais
+acl Safe_ports port 1025-65535  # unregistered ports
+acl Safe_ports port 280         # http-mgmt
+acl Safe_ports port 488         # gss-http
+acl Safe_ports port 591         # filemaker
+acl Safe_ports port 777         # multiling http
+acl CONNECT method CONNECT
+
+http_access allow SSL_ports CONNECT
+http_access deny !Safe_ports
+http_access allow localhost manager
+http_access deny manager
+http_access allow AllowedCIDRS AWS_VPC_CIDR
+http_access allow maven
+http_access allow localhost
+http_access deny all
+
+http_port 3128
+
+cache deny all
+cache_dir null /tmp
+coredump_dir /var/spool/squid
+
+refresh_pattern ^ftp:           1440    20%     10080
+refresh_pattern ^gopher:        1440    0%      1440
+refresh_pattern -i (/cgi-bin/|\?) 0     0%      0
+refresh_pattern .               0       20%     4320
diff --git a/infrastructure-provisioning/scripts/deploy_repository/templates/updateRepositories.groovy b/infrastructure-provisioning/scripts/deploy_repository/templates/updateRepositories.groovy
new file mode 100644
index 0000000..3bab507
--- /dev/null
+++ b/infrastructure-provisioning/scripts/deploy_repository/templates/updateRepositories.groovy
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019, EPAM SYSTEMS INC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.sonatype.nexus.common.entity.*
+
+repository.getRepositoryManager().delete('amazon-main');
+repository.getRepositoryManager().delete('amazon-updates');
+
+repository.createRawProxy('amazon-main','AMAZON_MAIN_URL', 'packages_store')
+repository.createRawProxy('amazon-updates','AMAZON_UPDATES_URL', 'packages_store')
+log.info('Script completed successfully')


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@dlab.apache.org
For additional commands, e-mail: commits-help@dlab.apache.org