You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dlab.apache.org by om...@apache.org on 2019/03/18 12:04:02 UTC

[incubator-dlab] 01/01: Fixed issue with S3 bucket tagging

This is an automated email from the ASF dual-hosted git repository.

omartushevskyi pushed a commit to branch DLAB-518
in repository https://gitbox.apache.org/repos/asf/incubator-dlab.git

commit dbbc91b69248d11284baf2e8aab38f4669b9cf0f
Author: Oleh Martushevskyi <Ol...@epam.com>
AuthorDate: Mon Mar 18 14:03:50 2019 +0200

    Fixed issue with S3 bucket tagging
---
 .../src/general/lib/aws/actions_lib.py             | 298 ++++++++++++++-------
 .../general/scripts/aws/common_create_bucket.py    |   3 +-
 .../src/general/scripts/aws/edge_prepare.py        |  59 ++--
 .../src/general/scripts/aws/ssn_configure.py       |  10 +-
 .../src/general/scripts/aws/ssn_prepare.py         |  38 ++-
 5 files changed, 262 insertions(+), 146 deletions(-)

diff --git a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
index 2209bee..284c744 100644
--- a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
@@ -40,6 +40,7 @@ import dlab.fab
 import uuid
 import ast
 
+
 def backoff_log(err):
     logging.info("Unable to create Tag: " + \
                  str(err) + "\n Traceback: " + \
@@ -57,32 +58,35 @@ def put_to_bucket(bucket_name, local_file, destination_file):
             s3.upload_fileobj(data, bucket_name, destination_file, ExtraArgs={'ServerSideEncryption': 'AES256'})
         return True
     except Exception as err:
-        logging.info("Unable to upload files to S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to upload files to S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to upload files to S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to upload files to S3 bucket",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
         return False
 
 
-def create_s3_bucket(bucket_name, tag, region):
+def create_s3_bucket(bucket_name, tag, region, bucket_name_tag):
     try:
         s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))
         if region == "us-east-1":
             bucket = s3.create_bucket(Bucket=bucket_name)
         else:
             bucket = s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region})
-        boto3.client('s3', config=Config(signature_version='s3v4')).put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration={
-            'Rules': [
-                {
-                    'ApplyServerSideEncryptionByDefault': {
-                        'SSEAlgorithm': 'AES256'
-                    }
-                },
-            ]
-        })
+        boto3.client('s3', config=Config(signature_version='s3v4')).put_bucket_encryption(
+            Bucket=bucket_name, ServerSideEncryptionConfiguration={
+                'Rules': [
+                    {
+                        'ApplyServerSideEncryptionByDefault': {
+                            'SSEAlgorithm': 'AES256'
+                        }
+                    },
+                ]
+            })
         tags = list()
         tags.append(tag)
-        tags.append({'Key': os.environ['conf_tag_resource_id'], 'Value': os.environ['conf_service_base_name'] + ':' +
-                                                                         bucket_name})
+        tags.append({'Key': os.environ['conf_tag_resource_id'],
+                     'Value': os.environ['conf_service_base_name'] + ':' + bucket_name_tag})
         tags.append({'Key': os.environ['conf_billing_tag_key'], 'Value': os.environ['conf_billing_tag_value']})
         if 'conf_additional_tags' in os.environ:
             for tag in os.environ['conf_additional_tags'].split(';'):
@@ -97,8 +101,10 @@ def create_s3_bucket(bucket_name, tag, region):
         tagging.reload()
         return bucket.name
     except Exception as err:
-        logging.info("Unable to create S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to create S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to create S3 bucket",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -110,7 +116,8 @@ def create_vpc(vpc_cidr, tag):
         return vpc.id
     except Exception as err:
         logging.info("Unable to create VPC: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create VPC", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create VPC",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -120,8 +127,10 @@ def enable_vpc_dns(vpc_id):
         client.modify_vpc_attribute(VpcId=vpc_id,
                                     EnableDnsHostnames={'Value': True})
     except Exception as err:
-        logging.info("Unable to modify VPC attributes: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to modify VPC attributes", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to modify VPC attributes: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to modify VPC attributes",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -132,7 +141,8 @@ def remove_vpc(vpc_id):
         print("VPC {} has been removed".format(vpc_id))
     except Exception as err:
         logging.info("Unable to remove VPC: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove VPC", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to remove VPC",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -202,7 +212,8 @@ def remove_emr_tag(emr_id, tag):
         emr.remove_tags(ResourceId=emr_id, TagKeys=tag)
     except Exception as err:
         logging.info("Unable to remove Tag: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove Tag", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to remove Tag",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -227,7 +238,8 @@ def create_rt(vpc_id, infra_tag_name, infra_tag_value, secondary):
         return rt_id
     except Exception as err:
         logging.info("Unable to create Route Table: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create Route Table", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create Route Table",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -240,7 +252,8 @@ def create_subnet(vpc_id, subnet, tag):
         return subnet.id
     except Exception as err:
         logging.info("Unable to create Subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create Subnet", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create Subnet",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -251,7 +264,8 @@ def create_security_group(security_group_name, vpc_id, security_group_rules, egr
         time.sleep(10)
         create_tag(group.id, tag)
         try:
-            group.revoke_egress(IpPermissions=[{"IpProtocol": "-1", "IpRanges": [{"CidrIp": "0.0.0.0/0"}], "UserIdGroupPairs": [], "PrefixListIds": []}])
+            group.revoke_egress(IpPermissions=[{"IpProtocol": "-1", "IpRanges": [{"CidrIp": "0.0.0.0/0"}],
+                                                "UserIdGroupPairs": [], "PrefixListIds": []}])
         except:
             print("Mentioned rule does not exist")
         for rule in security_group_rules:
@@ -261,9 +275,11 @@ def create_security_group(security_group_name, vpc_id, security_group_rules, egr
         return group.id
     except Exception as err:
         logging.info("Unable to create security group: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create security group", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create security group",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
+
 def create_route_by_id(subnet_id, vpc_id, peering_id, another_cidr):
     client = boto3.client('ec2')
     try:
@@ -293,13 +309,16 @@ def create_route_by_id(subnet_id, vpc_id, peering_id, another_cidr):
                            "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
+
 def create_peer_routes(peering_id, service_base_name):
     client = boto3.client('ec2')
     try:
         route_tables = client.describe_route_tables(
-            Filters=[{'Name': 'tag:{}-Tag'.format(service_base_name), 'Values': ['{}'.format(service_base_name)]}]).get('RouteTables')
+            Filters=[{'Name': 'tag:{}-Tag'.format(service_base_name), 'Values': ['{}'.format(
+                service_base_name)]}]).get('RouteTables')
         route_tables2 = client.describe_route_tables(Filters=[
-            {'Name': 'tag:{}-secondary-Tag'.format(service_base_name), 'Values': ['{}'.format(service_base_name)]}]).get('RouteTables')
+            {'Name': 'tag:{}-secondary-Tag'.format(service_base_name), 'Values': ['{}'.format(
+                service_base_name)]}]).get('RouteTables')
         for table in route_tables:
             routes = table.get('Routes')
             routeExists=False
@@ -347,8 +366,10 @@ def create_peering_connection(vpc_id, vpc2_id, service_base_name):
         create_tag(peering.id, json.dumps(tag))
         return peering.id
     except Exception as err:
-        logging.info("Unable to create peering connection: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create peering connection", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to create peering connection: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to create peering connection",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -358,7 +379,8 @@ def enable_auto_assign_ip(subnet_id):
         client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_id)
     except Exception as err:
         logging.info("Unable to create Subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create Subnet", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create Subnet",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -436,7 +458,8 @@ def create_instance(definitions, instance_tag, primary_disk_size=12):
         return ''
     except Exception as err:
         logging.info("Unable to create EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to create EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to create EC2",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 def tag_intance_volume(instance_id, node_name, instance_tag):
@@ -489,16 +512,25 @@ def create_iam_role(role_name, role_profile, region, service='ec2'):
     conn = boto3.client('iam')
     try:
         if region == 'cn-north-1':
-            conn.create_role(RoleName=role_name,
-                             AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service + '.amazonaws.com.cn"]},"Action":["sts:AssumeRole"]}]}')
+            conn.create_role(
+                RoleName=role_name,
+                AssumeRolePolicyDocument=
+                '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service +
+                '.amazonaws.com.cn"]},"Action":["sts:AssumeRole"]}]}')
         else:
-            conn.create_role(RoleName=role_name, AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service + '.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}')
+            conn.create_role(
+                RoleName=role_name, AssumeRolePolicyDocument=
+                '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service +
+                '.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}')
     except botocore.exceptions.ClientError as e_role:
         if e_role.response['Error']['Code'] == 'EntityAlreadyExists':
             print("IAM role already exists. Reusing...")
         else:
-            logging.info("Unable to create IAM role: " + str(e_role.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to create IAM role", "error_message": str(e_role.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+            logging.info("Unable to create IAM role: " + str(e_role.response['Error']['Message']) +
+                         "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+            append_result(str({"error": "Unable to create IAM role",
+                               "error_message": str(e_role.response['Error']['Message']) + "\n Traceback: " +
+                                                traceback.print_exc(file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
             return
     if service == 'ec2':
@@ -510,16 +542,22 @@ def create_iam_role(role_name, role_profile, region, service='ec2'):
             if e_profile.response['Error']['Code'] == 'EntityAlreadyExists':
                 print("Instance profile already exists. Reusing...")
             else:
-                logging.info("Unable to create Instance Profile: " + str(e_profile.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-                append_result(str({"error": "Unable to create Instance Profile", "error_message": str(e_profile.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+                logging.info("Unable to create Instance Profile: " + str(e_profile.response['Error']['Message']) +
+                             "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+                append_result(str({"error": "Unable to create Instance Profile",
+                                   "error_message": str(e_profile.response['Error']['Message']) + "\n Traceback: " +
+                                                    traceback.print_exc(file=sys.stdout)}))
                 traceback.print_exc(file=sys.stdout)
                 return
         try:
             conn.add_role_to_instance_profile(InstanceProfileName=role_profile, RoleName=role_name)
             time.sleep(30)
         except botocore.exceptions.ClientError as err:
-            logging.info("Unable to add IAM role to instance profile: " + str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to add IAM role to instance profile", "error_message": str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+            logging.info("Unable to add IAM role to instance profile: " + str(err.response['Error']['Message']) +
+                         "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+            append_result(str({"error": "Unable to add IAM role to instance profile",
+                               "error_message": str(err.response['Error']['Message']) + "\n Traceback: " +
+                                                traceback.print_exc(file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
 
@@ -529,8 +567,11 @@ def attach_policy(role_name, policy_arn):
         conn.attach_role_policy(PolicyArn=policy_arn, RoleName=role_name)
         time.sleep(30)
     except botocore.exceptions.ClientError as err:
-        logging.info("Unable to attach Policy: " + str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to attach Policy", "error_message": str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to attach Policy: " + str(err.response['Error']['Message']) + "\n Traceback: " +
+                     traceback.print_exc(file=sys.stdout))
+        append_result(str({"error": "Unable to attach Policy",
+                           "error_message": str(err.response['Error']['Message']) + "\n Traceback: " +
+                                            traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -542,7 +583,8 @@ def create_attach_policy(policy_name, role_name, file_path):
         conn.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=json_file)
     except Exception as err:
         logging.info("Unable to attach Policy: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to attach Policy", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to attach Policy",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -606,7 +648,8 @@ def remove_route_53_record(hosted_zone_id, hosted_zone_name, subdomain):
                                           )
         else:
             route53_client = boto3.client('route53')
-        for record_set in route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id).get('ResourceRecordSets'):
+        for record_set in route53_client.list_resource_record_sets(
+                HostedZoneId=hosted_zone_id).get('ResourceRecordSets'):
             if record_set['Name'] == "{}.{}.".format(subdomain, hosted_zone_name):
                 for record in record_set['ResourceRecords']:
                     route53_client.change_resource_record_sets(
@@ -725,7 +768,8 @@ def remove_ec2(tag_name, tag_value):
             print("There are no instances with '{}' tag to terminate".format(tag_name))
     except Exception as err:
         logging.info("Unable to remove EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -749,7 +793,8 @@ def stop_ec2(tag_name, tag_value):
             print("There are no instances with {} name to stop".format(tag_value))
     except Exception as err:
         logging.info("Unable to stop EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to stop EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to stop EC2",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -773,7 +818,8 @@ def start_ec2(tag_name, tag_value):
             print("There are no instances with {} name to start".format(tag_value))
     except Exception as err:
         logging.info("Unable to start EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to start EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to start EC2",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -790,7 +836,8 @@ def remove_detach_iam_policies(role_name, action=''):
                 client.delete_policy(PolicyArn=policy_arn)
                 print("The IAM policy {} has been deleted successfully".format(policy_arn))
     except Exception as err:
-        logging.info("Unable to remove/detach IAM policy: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+        logging.info("Unable to remove/detach IAM policy: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
         append_result(str({"error": "Unable to remove/detach IAM policy",
                    "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
@@ -802,9 +849,11 @@ def remove_roles_and_profiles(role_name, role_profile_name):
         client.remove_role_from_instance_profile(InstanceProfileName=role_profile_name, RoleName=role_name)
         client.delete_instance_profile(InstanceProfileName=role_profile_name)
         client.delete_role(RoleName=role_name)
-        print("The IAM role {0} and instance profile {1} have been deleted successfully".format(role_name, role_profile_name))
+        print("The IAM role {0} and instance profile {1} have been deleted successfully".format(role_name,
+                                                                                                role_profile_name))
     except Exception as err:
-        logging.info("Unable to remove IAM role/profile: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+        logging.info("Unable to remove IAM role/profile: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
         append_result(str({"error": "Unable to remove IAM role/profile",
                    "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
@@ -839,7 +888,8 @@ def remove_all_iam_resources(instance_type, scientist=''):
                 if '-edge-Role' in iam_role:
                     if instance_type == 'edge' and scientist in iam_role:
                         remove_detach_iam_policies(iam_role, 'delete')
-                        role_profile_name = os.environ['conf_service_base_name'] + '-' + '{}'.format(scientist) + '-edge-Profile'
+                        role_profile_name = os.environ['conf_service_base_name'] + '-' + '{}'.format(scientist) + \
+                                            '-edge-Profile'
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -849,7 +899,8 @@ def remove_all_iam_resources(instance_type, scientist=''):
                             print("The IAM role {} has been deleted successfully".format(iam_role))
                     if instance_type == 'all':
                         remove_detach_iam_policies(iam_role, 'delete')
-                        role_profile_name = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
+                        role_profile_name = client.list_instance_profiles_for_role(
+                            RoleName=iam_role).get('InstanceProfiles')
                         if role_profile_name:
                             for i in role_profile_name:
                                 role_profile_name = i.get('InstanceProfileName')
@@ -861,7 +912,8 @@ def remove_all_iam_resources(instance_type, scientist=''):
                 if '-nb-de-Role' in iam_role:
                     if instance_type == 'notebook' and scientist in iam_role:
                         remove_detach_iam_policies(iam_role)
-                        role_profile_name = os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + '-nb-de-Profile'
+                        role_profile_name = os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + \
+                                            '-nb-de-Profile'
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -871,7 +923,8 @@ def remove_all_iam_resources(instance_type, scientist=''):
                             print("The IAM role {} has been deleted successfully".format(iam_role))
                     if instance_type == 'all':
                         remove_detach_iam_policies(iam_role)
-                        role_profile_name = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
+                        role_profile_name = client.list_instance_profiles_for_role(
+                            RoleName=iam_role).get('InstanceProfiles')
                         if role_profile_name:
                             for i in role_profile_name:
                                 role_profile_name = i.get('InstanceProfileName')
@@ -908,8 +961,10 @@ def remove_all_iam_resources(instance_type, scientist=''):
         else:
             print("There are no instance profiles to delete")
     except Exception as err:
-        logging.info("Unable to remove some of the IAM resources: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove some of the IAM resources", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to remove some of the IAM resources: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to remove some of the IAM resources",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -928,7 +983,8 @@ def s3_cleanup(bucket, cluster_name, user_name):
             s3_res.Object(resource.name, i.key).delete()
     except Exception as err:
         logging.info("Unable to clean S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to clean S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to clean S3 bucket",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -941,7 +997,8 @@ def remove_s3(bucket_type='all', scientist=''):
             bucket_name = (os.environ['conf_service_base_name'] + '-ssn-bucket').lower().replace('_', '-')
             bucket_list.append((os.environ['conf_service_base_name'] + '-shared-bucket').lower().replace('_', '-'))
         elif bucket_type == 'edge':
-            bucket_name = (os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + '-bucket').lower().replace('_', '-')
+            bucket_name = (os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) +
+                           '-bucket').lower().replace('_', '-')
         else:
             bucket_name = (os.environ['conf_service_base_name']).lower().replace('_', '-')
         for item in client.list_buckets().get('Buckets'):
@@ -960,8 +1017,10 @@ def remove_s3(bucket_type='all', scientist=''):
             else:
                 print("There are no buckets to delete")
     except Exception as err:
-        logging.info("Unable to remove S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to remove S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to remove S3 bucket",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -988,7 +1047,8 @@ def remove_subnets(tag_value):
             print("There are no private subnets to delete")
     except Exception as err:
         logging.info("Unable to remove subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove subnet", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to remove subnet", "error_message": str(err) + "\n Traceback: " +
+                                                                                traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -997,8 +1057,11 @@ def remove_peering(tag_value):
         client = boto3.client('ec2')
         tag_name = os.environ['conf_service_base_name'] + '-Tag'
         if os.environ['conf_duo_vpc_enable']=='true':
-            peering_id = client.describe_vpc_peering_connections(Filters=[{'Name': 'tag-key', 'Values': [tag_name]}, {'Name': 'tag-value', 'Values': [tag_value]},
-                                                                   {'Name': 'status-code', 'Values': ['active']}]).get('VpcPeeringConnections')[0].get('VpcPeeringConnectionId')
+            peering_id = client.describe_vpc_peering_connections(Filters=[
+                {'Name': 'tag-key', 'Values': [tag_name]},
+                {'Name': 'tag-value', 'Values': [tag_value]},
+                {'Name': 'status-code', 'Values':
+                    ['active']}]).get('VpcPeeringConnections')[0].get('VpcPeeringConnectionId')
             if peering_id:
                 client.delete_vpc_peering_connection(VpcPeeringConnectionId=peering_id)
                 print("Peering connection {} has been deleted successfully".format(peering_id))
@@ -1007,8 +1070,10 @@ def remove_peering(tag_value):
         else:
             print("There are no peering connections to delete because duo vpc option is disabled")
     except Exception as err:
-        logging.info("Unable to remove peering connection: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove peering connection", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to remove peering connection: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to remove peering connection",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 def remove_sgroups(tag_value):
@@ -1026,7 +1091,8 @@ def remove_sgroups(tag_value):
             print("There are no security groups to delete")
     except Exception as err:
         logging.info("Unable to remove SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to remove SG",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -1042,8 +1108,10 @@ def add_inbound_sg_rule(sg_id, rule):
             print("The following inbound rule is already exist:")
             print(str(rule))
         else:
-            logging.info("Unable to add inbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to add inbound rule to SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+            logging.info("Unable to add inbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(
+                file=sys.stdout))
+            append_result(str({"error": "Unable to add inbound rule to SG",
+                               "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
 
@@ -1059,8 +1127,10 @@ def add_outbound_sg_rule(sg_id, rule):
             print("The following outbound rule is already exist:")
             print(str(rule))
         else:
-            logging.info("Unable to add outbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to add outbound rule to SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+            logging.info("Unable to add outbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(
+                file=sys.stdout))
+            append_result(str({"error": "Unable to add outbound rule to SG",
+                               "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
 
@@ -1078,8 +1148,10 @@ def deregister_image(image_name='*'):
                     client.delete_snapshot(SnapshotId=device.get('Ebs').get('SnapshotId'))
             print("Notebook AMI {} has been deregistered successfully".format(image.id))
     except Exception as err:
-        logging.info("Unable to de-register image: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to de-register image", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to de-register image: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to de-register image",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -1093,7 +1165,8 @@ def terminate_emr(id):
         waiter.wait(ClusterId=id)
     except Exception as err:
         logging.info("Unable to remove EMR: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove EMR", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        append_result(str({"error": "Unable to remove EMR",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -1115,15 +1188,17 @@ def remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_ver
                 if exists('/home/{}/.ensure_dir/dataengine-service_{}_interpreter_ensured'.format(ssh_user, emr_name)):
                     if os.environ['notebook_multiple_clusters'] == 'true':
                         try:
-                            livy_port = sudo("cat /opt/" + emr_version + "/" + emr_name
-                                             + "/livy/conf/livy.conf | grep livy.server.port | tail -n 1 | awk '{printf $3}'")
+                            livy_port = sudo("cat /opt/" + emr_version + "/" + emr_name +
+                                             "/livy/conf/livy.conf | grep livy.server.port | tail -n 1 | "
+                                             "awk '{printf $3}'")
                             process_number = sudo("netstat -natp 2>/dev/null | grep ':" + livy_port +
                                                   "' | awk '{print $7}' | sed 's|/.*||g'")
                             sudo('kill -9 ' + process_number)
                             sudo('systemctl disable livy-server-' + livy_port)
                         except:
                             print("Wasn't able to find Livy server for this EMR!")
-                    sudo('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh')
+                    sudo('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/spark/\" '
+                         '/opt/zeppelin/conf/zeppelin-env.sh')
                     sudo("rm -rf /home/{}/.ensure_dir/dataengine-service_interpreter_ensure".format(ssh_user))
                     zeppelin_url = 'http://' + private + ':8080/api/interpreter/setting/'
                     opener = urllib2.build_opener(urllib2.ProxyHandler({}))
@@ -1151,7 +1226,8 @@ def remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_ver
                         if result == '1':
                             zeppelin_restarted = True
                     sudo('sleep 5')
-                    sudo('rm -rf /home/{}/.ensure_dir/dataengine-service_{}_interpreter_ensured'.format(ssh_user, emr_name))
+                    sudo('rm -rf /home/{}/.ensure_dir/dataengine-service_{}_interpreter_ensured'.format(ssh_user,
+                                                                                                        emr_name))
                 if exists('/home/{}/.ensure_dir/rstudio_dataengine-service_ensured'.format(ssh_user)):
                     dlab.fab.remove_rstudio_dataengines_kernel(computational_name, ssh_user)
                 sudo('rm -rf  /opt/' + emr_version + '/' + emr_name + '/')
@@ -1159,8 +1235,10 @@ def remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_ver
         else:
             print("There are no notebooks to clean kernels.")
     except Exception as err:
-        logging.info("Unable to remove kernels on Notebook: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-        append_result(str({"error": "Unable to remove kernels on Notebook", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
+        logging.info("Unable to remove kernels on Notebook: " + str(err) + "\n Traceback: " + traceback.print_exc(
+            file=sys.stdout))
+        append_result(str({"error": "Unable to remove kernels on Notebook",
+                           "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
         traceback.print_exc(file=sys.stdout)
 
 
@@ -1392,22 +1470,24 @@ def installing_python(region, bucket, user_name, cluster_name, application='', p
         if region == 'cn-north-1':
             try:
                 local(venv_command + ' && sudo -i ' + pip_command +
-                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 -U pip==9.0.3 --no-cache-dir'.format(pip_mirror))
+                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 -U pip==9.0.3 '
+                      '--no-cache-dir'.format(pip_mirror))
                 local(venv_command + ' && sudo -i ' + pip_command + ' install pyzmq==17.0.0')
                 local(venv_command + ' && sudo -i ' + pip_command +
-                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 ipython ipykernel --no-cache-dir'.
-                      format(pip_mirror))
+                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 ipython ipykernel '
+                      '--no-cache-dir'.format(pip_mirror))
                 local(venv_command + ' && sudo -i ' + pip_command +
-                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 boto boto3 NumPy=={1} SciPy Matplotlib==2.0.2 pandas Sympy Pillow sklearn --no-cache-dir'.
-                      format(pip_mirror, numpy_version))
+                      ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 boto boto3 NumPy=={1} SciPy '
+                      'Matplotlib==2.0.2 pandas Sympy Pillow sklearn --no-cache-dir'.format(pip_mirror, numpy_version))
                 # Need to refactor when we add GPU cluster
                 if application == 'deeplearning':
                     local(venv_command + ' && sudo -i ' + pip_command +
-                          ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 mxnet-cu80 opencv-python keras Theano --no-cache-dir'.format(pip_mirror))
+                          ' install -i https://{0}/simple --trusted-host {0} --timeout 60000 mxnet-cu80 opencv-python '
+                          'keras Theano --no-cache-dir'.format(pip_mirror))
                     python_without_dots = python_version.replace('.', '')
                     local(venv_command + ' && sudo -i ' + pip_command +
-                          ' install  https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl --no-cache-dir'.
-                          format(python_without_dots[:2]))
+                          ' install  https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl '
+                          '--no-cache-dir'.format(python_without_dots[:2]))
                 local('sudo rm /etc/pip.conf')
                 local('sudo mv /etc/back_pip.conf /etc/pip.conf')
             except:
@@ -1420,15 +1500,16 @@ def installing_python(region, bucket, user_name, cluster_name, application='', p
             local(venv_command + ' && sudo -i ' + pip_command + ' install pyzmq==17.0.0')
             local(venv_command + ' && sudo -i ' + pip_command + ' install ipython ipykernel --no-cache-dir')
             local(venv_command + ' && sudo -i ' + pip_command +
-                  ' install boto boto3 NumPy=={} SciPy Matplotlib==2.0.2 pandas Sympy Pillow sklearn --no-cache-dir'.format(numpy_version))
+                  ' install boto boto3 NumPy=={} SciPy Matplotlib==2.0.2 pandas Sympy Pillow sklearn '
+                  '--no-cache-dir'.format(numpy_version))
             # Need to refactor when we add GPU cluster
             if application == 'deeplearning':
                 local(venv_command + ' && sudo -i ' + pip_command +
                       ' install mxnet-cu80 opencv-python keras Theano --no-cache-dir')
                 python_without_dots = python_version.replace('.', '')
                 local(venv_command + ' && sudo -i ' + pip_command +
-                      ' install  https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl --no-cache-dir'.
-                      format(python_without_dots[:2]))
+                      ' install  https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl '
+                      '--no-cache-dir'.format(python_without_dots[:2]))
         local('sudo rm -rf /usr/bin/python' + python_version[0:3])
         local('sudo ln -fs /opt/python/python' + python_version + '/bin/python' + python_version[0:3] +
               ' /usr/bin/python' + python_version[0:3])
@@ -1440,7 +1521,8 @@ def spark_defaults(args):
         local(""" sudo bash -c " sed -i '/""" + i + """/d' """ + spark_def_path + """ " """)
     local(""" sudo bash -c " sed -i '/#/d' """ + spark_def_path + """ " """)
     local(""" sudo bash -c " sed -i '/^\s*$/d' """ + spark_def_path + """ " """)
-    local(""" sudo bash -c "sed -i '/spark.driver.extraClassPath/,/spark.driver.extraLibraryPath/s|/usr|/opt/DATAENGINE-SERVICE_VERSION/jars/usr|g' """ + spark_def_path + """ " """)
+    local(""" sudo bash -c "sed -i '/spark.driver.extraClassPath/,/spark.driver.extraLibraryPath/s|"""
+          """/usr|/opt/DATAENGINE-SERVICE_VERSION/jars/usr|g' """ + spark_def_path + """ " """)
     local(""" sudo bash -c "sed -i '/spark.yarn.dist.files/s/\/etc\/spark\/conf/\/opt\/DATAENGINE-SERVICE_VERSION\/CLUSTER\/conf/g' """
           + spark_def_path + """ " """)
     template_file = spark_def_path
@@ -1456,7 +1538,8 @@ def spark_defaults(args):
         endpoint_url = "https://s3.{}.amazonaws.com.cn".format(args.region)
     else:
         endpoint_url = 'https://s3-' + args.region + '.amazonaws.com'
-    local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint    """ + endpoint_url + """ " >> """ + spark_def_path + """'""")
+    local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint    """ + endpoint_url + """ " >> """ +
+          spark_def_path + """'""")
     local('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm   AES256" >> {}'.format(spark_def_path))
 
 
@@ -1544,7 +1627,8 @@ def configure_zeppelin_emr_interpreter(emr_version, cluster_name, region, spark_
         python_version = python_version[0:5]
         livy_port = ''
         livy_path = '/opt/{0}/{1}/livy/'.format(emr_version, cluster_name)
-        spark_libs = "/opt/{0}/jars/usr/share/aws/aws-java-sdk/aws-java-sdk-core*.jar /opt/{0}/jars/usr/lib/hadoop/hadoop-aws*.jar /opt/" + \
+        spark_libs = "/opt/{0}/jars/usr/share/aws/aws-java-sdk/aws-java-sdk-core*.jar /opt/{0}/jars/usr/lib/hadoop" \
+                     "/hadoop-aws*.jar /opt/" + \
                      "{0}/jars/usr/share/aws/aws-java-sdk/aws-java-sdk-s3-*.jar /opt/{0}" + \
                      "/jars/usr/lib/hadoop-lzo/lib/hadoop-lzo-*.jar".format(emr_version)
         #fix due to: Multiple py4j files found under ..../spark/python/lib
@@ -1552,8 +1636,8 @@ def configure_zeppelin_emr_interpreter(emr_version, cluster_name, region, spark_
         local('rm /opt/{0}/{1}/spark/python/lib/py4j-src.zip'.format(emr_version, cluster_name))
 
         local('echo \"Configuring emr path for Zeppelin\"')
-        local('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/{0}\/{1}\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh'.
-              format(emr_version, cluster_name))
+        local('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/{0}\/{1}\/spark/\" '
+              '/opt/zeppelin/conf/zeppelin-env.sh'.format(emr_version, cluster_name))
         local('sed -i "s/^export HADOOP_CONF_DIR.*/export HADOOP_CONF_DIR=' + \
               '\/opt\/{0}\/{1}\/conf/" /opt/{0}/{1}/spark/conf/spark-env.sh'.format(emr_version, cluster_name))
         local('echo \"spark.jars $(ls {0} | tr \'\\n\' \',\')\" >> /opt/{1}/{2}/spark/conf/spark-defaults.conf'
@@ -1590,7 +1674,8 @@ def configure_zeppelin_emr_interpreter(emr_version, cluster_name, region, spark_
                 local('sudo sed -i "s/^/#/g" {}conf/spark-blacklist.conf'.format(livy_path))
             local(''' sudo echo "export SPARK_HOME={0}" >> {1}conf/livy-env.sh'''.format(spark_dir, livy_path))
             local(''' sudo echo "export HADOOP_CONF_DIR={0}" >> {1}conf/livy-env.sh'''.format(yarn_dir, livy_path))
-            local(''' sudo echo "export PYSPARK3_PYTHON=python{0}" >> {1}conf/livy-env.sh'''.format(python_version[0:3], livy_path))
+            local(''' sudo echo "export PYSPARK3_PYTHON=python{0}" >> {1}conf/livy-env.sh'''.format(python_version[0:3],
+                                                                                                    livy_path))
             template_file = "/tmp/dataengine-service_interpreter.json"
             fr = open(template_file, 'r+')
             text = fr.read()
@@ -1657,9 +1742,12 @@ def configure_dataengine_spark(cluster_name, jars_dir, cluster_dir, datalake_ena
         endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
     else:
         endpoint_url = 'https://s3-' + region + '.amazonaws.com'
-    local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint    """ + endpoint_url + """" >> /tmp/{}/notebook_spark-defaults_local.conf'""".format(cluster_name))
-    local('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm   AES256" >> /tmp/{}/notebook_spark-defaults_local.conf'.format(cluster_name))
-    local('cp -f /tmp/{0}/notebook_spark-defaults_local.conf  {1}spark/conf/spark-defaults.conf'.format(cluster_name, cluster_dir))
+    local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint    """ + endpoint_url +
+          """" >> /tmp/{}/notebook_spark-defaults_local.conf'""".format(cluster_name))
+    local('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm   AES256" >> '
+          '/tmp/{}/notebook_spark-defaults_local.conf'.format(cluster_name))
+    local('cp -f /tmp/{0}/notebook_spark-defaults_local.conf  {1}spark/conf/spark-defaults.conf'.format(cluster_name,
+                                                                                                        cluster_dir))
     if spark_configs:
         spark_configurations = ast.literal_eval(spark_configs)
         new_spark_defaults = list()
@@ -1769,10 +1857,14 @@ def ensure_local_spark(os_user, spark_link, spark_version, hadoop_version, local
             sys.exit(1)
 
 
-def install_dataengine_spark(cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, os_user, datalake_enabled):
-    local('wget ' + spark_link + ' -O /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz')
-    local('tar -zxvf /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz -C /opt/' + cluster_name)
-    local('mv /opt/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + ' ' + cluster_dir + 'spark/')
+def install_dataengine_spark(cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, os_user,
+                             datalake_enabled):
+    local('wget ' + spark_link + ' -O /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' +
+          hadoop_version + '.tgz')
+    local('tar -zxvf /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version +
+          '.tgz -C /opt/' + cluster_name)
+    local('mv /opt/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + ' ' +
+          cluster_dir + 'spark/')
     local('chown -R ' + os_user + ':' + os_user + ' ' + cluster_dir + 'spark/')
 
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
index c14c8f4..9bce147 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_bucket.py
@@ -30,6 +30,7 @@ import sys
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--bucket_name', type=str, default='')
+parser.add_argument('--bucket_name_tag', type=str, default='')
 parser.add_argument('--infra_tag_name', type=str, default='')
 parser.add_argument('--infra_tag_value', type=str, default='')
 parser.add_argument('--region', type=str, default='')
@@ -43,7 +44,7 @@ if __name__ == "__main__":
             bucket = get_bucket_by_name(args.bucket_name)
             if bucket == '':
                 print("Creating bucket {0} with tag {1}.".format(args.bucket_name, json.dumps(tag)))
-                bucket = create_s3_bucket(args.bucket_name, tag, args.region)
+                bucket = create_s3_bucket(args.bucket_name, tag, args.region, args.bucket_name_tag)
             else:
                 print("REQUESTED BUCKET ALREADY EXISTS")
             print("BUCKET_NAME {}".format(bucket))
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/edge_prepare.py
index 9de94dc..71423cd 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_prepare.py
@@ -52,8 +52,9 @@ if __name__ == "__main__":
     edge_conf['sg_ids'] = os.environ['aws_security_groups_ids']
     edge_conf['instance_name'] = '{}-{}-edge'.format(edge_conf['service_base_name'], os.environ['edge_user_name'])
     edge_conf['tag_name'] = '{}-Tag'.format(edge_conf['service_base_name'])
-    edge_conf['bucket_name'] = '{}-{}-bucket'.format(edge_conf['service_base_name'],
-                                                     os.environ['edge_user_name']).lower().replace('_', '-')
+    edge_conf['bucket_name_tag'] = '{}-{}-bucket'.format(edge_conf['service_base_name'],
+                                                     os.environ['edge_user_name'])
+    edge_conf['bucket_name'] = edge_conf['bucket_name_tag'].lower().replace('_', '-')
     edge_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(edge_conf['service_base_name']).lower().replace('_', '-')
     edge_conf['shared_bucket_name'] = '{}-shared-bucket'.format(edge_conf['service_base_name']).lower().replace('_',
                                                                                                                 '-')
@@ -109,10 +110,11 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
-        params = "--vpc_id '{}' --infra_tag_name {} --infra_tag_value {} --username {} --prefix {} --user_subnets_range '{}'" \
-                 .format(edge_conf['vpc2_id'], edge_conf['tag_name'], edge_conf['service_base_name'],
-                         os.environ['edge_user_name'], edge_conf['private_subnet_prefix'],
-                         os.environ['conf_user_subnets_range'])
+        params = "--vpc_id '{}' --infra_tag_name {} --infra_tag_value {} --username {} --prefix {} " \
+                 "--user_subnets_range '{}'".format(edge_conf['vpc2_id'], edge_conf['tag_name'],
+                                                    edge_conf['service_base_name'], os.environ['edge_user_name'],
+                                                    edge_conf['private_subnet_prefix'],
+                                                    os.environ['conf_user_subnets_range'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
         except:
@@ -380,9 +382,11 @@ if __name__ == "__main__":
             }
         ])
 
-        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} --infra_tag_value {} --force {}".\
-            format(edge_conf['notebook_security_group_name'], edge_conf['vpc2_id'], json.dumps(private_sg_ingress),
-                   json.dumps(private_sg_egress), edge_conf['service_base_name'], edge_conf['notebook_instance_name'], True)
+        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                 "--infra_tag_value {} --force {}".format(edge_conf['notebook_security_group_name'],
+                                                          edge_conf['vpc2_id'], json.dumps(private_sg_ingress),
+                                                          json.dumps(private_sg_egress), edge_conf['service_base_name'],
+                                                          edge_conf['notebook_instance_name'], True)
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
@@ -404,10 +408,11 @@ if __name__ == "__main__":
     logging.info('[CREATING SECURITY GROUPS FOR MASTER NODE]')
     print("[CREATING SECURITY GROUPS FOR MASTER NODE]")
     try:
-        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} --infra_tag_value {} --force {}". \
-            format(edge_conf['dataengine_master_security_group_name'], edge_conf['vpc2_id'],
-                   json.dumps(private_sg_ingress), json.dumps(private_sg_egress), edge_conf['service_base_name'],
-                   edge_conf['dataengine_instances_name'], True)
+        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                 "--infra_tag_value {} --force {}".format(edge_conf['dataengine_master_security_group_name'],
+                                                          edge_conf['vpc2_id'], json.dumps(private_sg_ingress),
+                                                          json.dumps(private_sg_egress), edge_conf['service_base_name'],
+                                                          edge_conf['dataengine_instances_name'], True)
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
@@ -425,10 +430,11 @@ if __name__ == "__main__":
     logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
     print("[CREATING SECURITY GROUPS FOR SLAVE NODES]")
     try:
-        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} --infra_tag_value {} --force {}". \
-            format(edge_conf['dataengine_slave_security_group_name'], edge_conf['vpc2_id'],
-                   json.dumps(private_sg_ingress), json.dumps(private_sg_egress), edge_conf['service_base_name'],
-                   edge_conf['dataengine_instances_name'], True)
+        params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                 "--infra_tag_value {} --force {}".format(edge_conf['dataengine_slave_security_group_name'],
+                                                          edge_conf['vpc2_id'], json.dumps(private_sg_ingress),
+                                                          json.dumps(private_sg_egress), edge_conf['service_base_name'],
+                                                          edge_conf['dataengine_instances_name'], True)
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
@@ -447,9 +453,9 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATE BUCKETS]')
         print('[CREATE BUCKETS]')
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {}" \
+        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}" \
                  .format(edge_conf['bucket_name'], edge_conf['tag_name'], edge_conf['bucket_name'],
-                  edge_conf['region'])
+                         edge_conf['region'], edge_conf['bucket_name_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
         except:
@@ -468,10 +474,14 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATING BUCKET POLICY FOR USER INSTANCES]')
         print('[CREATING BUCKET POLICY FOR USER INSTANCES]')
-        params = '--bucket_name {} --ssn_bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} --notebook_role_name {} --service_base_name {} --region {} --user_predefined_s3_policies "{}"'.format(
-            edge_conf['bucket_name'], edge_conf['ssn_bucket_name'], edge_conf['shared_bucket_name'],
-            os.environ['edge_user_name'], edge_conf['role_name'], edge_conf['notebook_dataengine_role_name'],
-            edge_conf['service_base_name'], edge_conf['region'], os.environ['aws_user_predefined_s3_policies'])
+        params = '--bucket_name {} --ssn_bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} ' \
+                 '--notebook_role_name {} --service_base_name {} --region {} ' \
+                 '--user_predefined_s3_policies "{}"'.format(edge_conf['bucket_name'], edge_conf['ssn_bucket_name'],
+                                                             edge_conf['shared_bucket_name'],
+                                                             os.environ['edge_user_name'], edge_conf['role_name'],
+                                                             edge_conf['notebook_dataengine_role_name'],
+                                                             edge_conf['service_base_name'], edge_conf['region'],
+                                                             os.environ['aws_user_predefined_s3_policies'])
         try:
             local("~/scripts/{}.py {}".format('common_create_policy', params))
         except:
@@ -532,7 +542,8 @@ if __name__ == "__main__":
             print('Error: {0}'.format(err))
             append_result("Failed to associate elastic ip.", str(err))
             try:
-                edge_conf['edge_public_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get('Public')
+                edge_conf['edge_public_ip'] = get_instance_ip_address(edge_conf['tag_name'],
+                                                                      edge_conf['instance_name']).get('Public')
                 edge_conf['allocation_id'] = get_allocation_id_by_elastic_ip(edge_conf['edge_public_ip'])
             except:
                 print("No Elastic IPs to release!")
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
index 03ecdda..dcb4442 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
@@ -43,8 +43,10 @@ if __name__ == "__main__":
         role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
         role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
         policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        user_bucket_name = (service_base_name + '-ssn-bucket').lower().replace('_', '-')
-        shared_bucket_name = (service_base_name + '-shared-bucket').lower().replace('_', '-')
+        ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
+        shared_bucket_name_tag = service_base_name + '-shared-bucket'
+        ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
+        shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
         tag_name = service_base_name + '-Tag'
         tag2_name = service_base_name + '-secondary-Tag'
         instance_name = service_base_name + '-ssn'
@@ -391,7 +393,7 @@ if __name__ == "__main__":
         print("Security IDs: {}".format(os.environ['aws_security_groups_ids']))
         print("SSN instance shape: {}".format(os.environ['aws_ssn_instance_size']))
         print("SSN AMI name: {}".format(ssn_image_name))
-        print("SSN bucket name: {}".format(user_bucket_name))
+        print("SSN bucket name: {}".format(ssn_bucket_name))
         print("Shared bucket name: {}".format(shared_bucket_name))
         print("Region: {}".format(region))
         jenkins_url = "http://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
@@ -418,7 +420,7 @@ if __name__ == "__main__":
                    "subnet_id": os.environ['aws_subnet_id'],
                    "security_id": os.environ['aws_security_groups_ids'],
                    "instance_shape": os.environ['aws_ssn_instance_size'],
-                   "bucket_name": user_bucket_name,
+                   "bucket_name": ssn_bucket_name,
                    "shared_bucket_name": shared_bucket_name,
                    "region": region,
                    "action": "Create SSN instance"}
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
index 5712695..3fcda41 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
@@ -60,8 +60,10 @@ if __name__ == "__main__":
         role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
         role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
         policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        user_bucket_name = (service_base_name + '-ssn-bucket').lower().replace('_', '-')
-        shared_bucket_name = (service_base_name + '-shared-bucket').lower().replace('_', '-')
+        ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
+        shared_bucket_name_tag = service_base_name + '-shared-bucket'
+        ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
+        shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
         tag_name = service_base_name + '-Tag'
         tag2_name = service_base_name + '-secondary-Tag'
         instance_name = service_base_name + '-ssn'
@@ -86,7 +88,8 @@ if __name__ == "__main__":
                 pre_defined_vpc = True
                 logging.info('[CREATE VPC AND ROUTE TABLE]')
                 print('[CREATE VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {}".format(vpc_cidr, region, tag_name, service_base_name)
+                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
+                    vpc_cidr, region, tag_name, service_base_name)
                 try:
                     local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
                 except:
@@ -111,7 +114,8 @@ if __name__ == "__main__":
                 pre_defined_vpc2 = True
                 logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
                 print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary".format(vpc2_cidr, region, tag2_name, service_base_name)
+                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary".format(
+                    vpc2_cidr, region, tag2_name, service_base_name)
                 try:
                     local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
                 except:
@@ -136,7 +140,8 @@ if __name__ == "__main__":
                 pre_defined_subnet = True
                 logging.info('[CREATE SUBNET]')
                 print('[CREATE SUBNET]')
-                params = "--vpc_id {} --username {} --infra_tag_name {} --infra_tag_value {} --prefix {} --ssn {}".format(os.environ['aws_vpc_id'], 'ssn', tag_name, service_base_name, '20', True)
+                params = "--vpc_id {} --username {} --infra_tag_name {} --infra_tag_value {} --prefix {} " \
+                         "--ssn {}".format(os.environ['aws_vpc_id'], 'ssn', tag_name, service_base_name, '20', True)
                 try:
                     local("~/scripts/{}.py {}".format('common_create_subnet', params))
                 except:
@@ -169,9 +174,11 @@ if __name__ == "__main__":
             try:
                 logging.info('[CREATE PEERING CONNECTION]')
                 print('[CREATE PEERING CONNECTION]')
-                os.environ['aws_peering_id'] = create_peering_connection(os.environ['aws_vpc_id'], os.environ['aws_vpc2_id'], service_base_name)
+                os.environ['aws_peering_id'] = create_peering_connection(os.environ['aws_vpc_id'],
+                                                                         os.environ['aws_vpc2_id'], service_base_name)
                 print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
-                create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'], os.environ['aws_peering_id'], get_cidr_by_vpc(os.environ['aws_vpc2_id']))
+                create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'], os.environ['aws_peering_id'],
+                                   get_cidr_by_vpc(os.environ['aws_vpc2_id']))
             except Exception as err:
                 print('Error: {0}'.format(err))
                 append_result("Failed to create peering connection.", str(err))
@@ -236,8 +243,10 @@ if __name__ == "__main__":
                 egress_sg_rules_template = format_sg([
                     {"IpProtocol": "-1", "IpRanges": [{"CidrIp": all_ip_cidr}], "UserIdGroupPairs": [], "PrefixListIds": []}
                 ])
-                params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} --infra_tag_value {} --force {} --ssn {}". \
-                    format(sg_name, os.environ['aws_vpc_id'], json.dumps(ingress_sg_rules_template), json.dumps(egress_sg_rules_template), service_base_name, tag_name, False, True)
+                params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                         "--infra_tag_value {} --force {} --ssn {}". \
+                    format(sg_name, os.environ['aws_vpc_id'], json.dumps(ingress_sg_rules_template),
+                           json.dumps(egress_sg_rules_template), service_base_name, tag_name, False, True)
                 try:
                     local("~/scripts/{}.py {}".format('common_create_security_group', params))
                 except:
@@ -341,8 +350,8 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATE BUCKETS]')
         print('[CREATE BUCKETS]')
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {}". \
-                 format(user_bucket_name, tag_name, user_bucket_name, region)
+        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}". \
+                 format(ssn_bucket_name, tag_name, ssn_bucket_name, region, ssn_bucket_name_tag)
 
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
@@ -350,8 +359,8 @@ if __name__ == "__main__":
             traceback.print_exc()
             raise Exception
 
-        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {}". \
-                 format(shared_bucket_name, tag_name, shared_bucket_name, region)
+        params = "--bucket_name {} --infra_tag_name {} --infra_tag_value {} --region {} --bucket_name_tag {}". \
+                 format(shared_bucket_name, tag_name, shared_bucket_name, region, shared_bucket_name_tag)
 
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
@@ -379,7 +388,8 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATE SSN INSTANCE]')
         print('[CREATE SSN INSTANCE]')
-        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {}".\
+        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} " \
+                 "--iam_profile {} --infra_tag_name {} --infra_tag_value {}".\
             format(instance_name, ssn_ami_id, os.environ['aws_ssn_instance_size'], os.environ['conf_key_name'],
                    os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
                    role_profile_name, tag_name, instance_name)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@dlab.apache.org
For additional commands, e-mail: commits-help@dlab.apache.org