You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dlab.apache.org by dm...@apache.org on 2019/10/31 09:30:25 UTC

[incubator-dlab] branch DLAB-1195 created (now 57b812a)

This is an automated email from the ASF dual-hosted git repository.

dmysakovets pushed a change to branch DLAB-1195
in repository https://gitbox.apache.org/repos/asf/incubator-dlab.git.


      at 57b812a  [DLAB-1195] Fixed dataengine creation from notebook AMI[GCP]

This branch includes the following new commits:

     new 57b812a  [DLAB-1195] Fixed dataengine creation from notebook AMI[GCP]

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@dlab.apache.org
For additional commands, e-mail: commits-help@dlab.apache.org


[incubator-dlab] 01/01: [DLAB-1195] Fixed dataengine creation from notebook AMI[GCP]

Posted by dm...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dmysakovets pushed a commit to branch DLAB-1195
in repository https://gitbox.apache.org/repos/asf/incubator-dlab.git

commit 57b812a44d6970cb32bf4dbfcf7f18593c8b9712
Author: Demyan Mysakovets <de...@gmail.com>
AuthorDate: Thu Oct 31 11:30:10 2019 +0200

    [DLAB-1195] Fixed dataengine creation from notebook AMI[GCP]
---
 .../src/general/scripts/gcp/dataengine_prepare.py  | 37 ++++++++++++++--------
 1 file changed, 24 insertions(+), 13 deletions(-)

diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
index aea75c1..d0cf7ea 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
@@ -99,9 +99,9 @@ if __name__ == "__main__":
     data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
     data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
-    data_engine['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
 
     data_engine['primary_disk_size'] = '30'
+    data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
 
     data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
     if data_engine['shared_image_enabled'] == 'false':
@@ -127,6 +127,13 @@ if __name__ == "__main__":
         data_engine['primary_image_name'] = 'global/images/{}'.format(
             data_engine['primary_image_name'].get('name'))
 
+    data_engine['secondary_image_name'] = GCPMeta().get_image_by_name(data_engine['expected_secondary_image_name'])
+    if data_engine['secondary_image_name'] == '':
+        data_engine['secondary_image_name'] = 'None'
+    else:
+        print('Pre-configured secondary image found. Using: {}'.format(data_engine['secondary_image_name'].get('name')))
+        data_engine['secondary_image_name'] = 'global/images/{}'.format(data_engine['secondary_image_name'].get('name'))
+
     data_engine['gpu_accelerator_type'] = 'None'
     if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
         data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
@@ -152,15 +159,17 @@ if __name__ == "__main__":
     try:
         logging.info('[CREATE MASTER NODE]')
         print('[CREATE MASTER NODE]')
-        params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} " \
-                 "--ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} " \
-                 "--instance_class {} --primary_disk_size {} " \
-                 "--gpu_accelerator_type {} --network_tag {} --cluster_name {} --labels '{}'".\
+        params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
+                 "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
+                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13}  " \
+                 "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}'". \
             format(data_engine['master_node_name'], data_engine['region'], data_engine['zone'], data_engine['vpc_name'],
                    data_engine['subnet_name'], data_engine['master_size'], data_engine['ssh_key_path'], initial_user,
                    data_engine['dataengine_service_account_name'], data_engine['primary_image_name'],
-                   'dataengine', '30', data_engine['gpu_accelerator_type'],
-                   data_engine['network_tag'], data_engine['cluster_name'], json.dumps(data_engine['master_labels']))
+                   data_engine['secondary_image_name'], 'dataengine', data_engine['primary_disk_size'],
+                   data_engine['secondary_disk_size'], data_engine['gpu_accelerator_type'],
+                   data_engine['network_tag'], data_engine['cluster_name'],
+                   json.dumps(data_engine['master_labels']))
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
@@ -177,15 +186,17 @@ if __name__ == "__main__":
             logging.info('[CREATE SLAVE NODE {}]'.format(i + 1))
             print('[CREATE SLAVE NODE {}]'.format(i + 1))
             slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} " \
-                     "--ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} " \
-                     "--instance_class {} --primary_disk_size {} " \
-                     "--gpu_accelerator_type {} --network_tag {} --cluster_name {} --labels '{}'". \
+            params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
+                     "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
+                     "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
+                     "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}'". \
                 format(slave_name, data_engine['region'], data_engine['zone'],
                        data_engine['vpc_name'], data_engine['subnet_name'], data_engine['slave_size'],
                        data_engine['ssh_key_path'], initial_user, data_engine['dataengine_service_account_name'],
-                       data_engine['primary_image_name'], 'dataengine', '30',
-                       data_engine['gpu_accelerator_type'], data_engine['network_tag'], data_engine['cluster_name'],
+                       data_engine['primary_image_name'], data_engine['secondary_image_name'], 'dataengine',
+                       data_engine['primary_disk_size'],
+                       data_engine['secondary_disk_size'], data_engine['gpu_accelerator_type'],
+                       data_engine['network_tag'], data_engine['cluster_name'],
                        json.dumps(data_engine['slave_labels']))
             try:
                 local("~/scripts/{}.py {}".format('common_create_instance', params))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@dlab.apache.org
For additional commands, e-mail: commits-help@dlab.apache.org