You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@beam.apache.org by GitBox <gi...@apache.org> on 2022/03/01 20:59:50 UTC

[GitHub] [beam] KevinGG commented on a change in pull request #16904: [BEAM-13973] Link Dataproc Flink master URLs to the InteractiveRunner when FlinkRunner is used

KevinGG commented on a change in pull request #16904:
URL: https://github.com/apache/beam/pull/16904#discussion_r817135978



##########
File path: sdks/python/apache_beam/runners/interactive/dataproc/dataproc_cluster_manager.py
##########
@@ -186,15 +210,108 @@ def describe(self) -> None:
     """Returns a dictionary describing the cluster."""
     return {
         'cluster_metadata': self.cluster_metadata,
-        'master_url': self.master_url
+        'master_url': self.master_url,
+        'dashboard': self.dashboard
     }
 
-  def get_master_url(self, identifier) -> None:
+  def get_cluster_details(self, cluster_metadata: MasterURLIdentifier):
+    """Gets the Dataproc_v1 Cluster object for the current cluster manager."""
+    try:
+      return self._cluster_client.get_cluster(
+          request={
+              'project_id': cluster_metadata.project_id,
+              'region': cluster_metadata.region,
+              'cluster_name': cluster_metadata.cluster_name
+          })
+    except Exception as e:
+      if e.code == 403:
+        _LOGGER.error(
+            'Due to insufficient project permissions, '
+            'unable to retrieve information for cluster: %s',
+            cluster_metadata.cluster_name)
+        raise ValueError(
+            'You cannot view clusters in project: {}'.format(
+                cluster_metadata.project_id))
+      elif e.code == 404:
+        _LOGGER.error(
+            'Cluster does not exist: %s', cluster_metadata.cluster_name)
+        raise ValueError(
+            'Cluster was not found: {}'.format(cluster_metadata.cluster_name))
+      else:
+        _LOGGER.error(
+            'Failed to get information for cluster: %s',
+            cluster_metadata.cluster_name)
+        raise e
+
+  def wait_for_cluster_to_provision(
+      self, cluster_metadata: MasterURLIdentifier) -> None:
+    while self.get_cluster_details(
+        cluster_metadata).status.state.name == 'CREATING':
+      time.sleep(15)
+
+  def get_staging_location(self, cluster_metadata: MasterURLIdentifier) -> str:
+    """Gets the staging bucket of an existing Dataproc cluster."""
+    try:
+      daemon_thread = Thread(
+          target=self.wait_for_cluster_to_provision(cluster_metadata))
+      daemon_thread.setDaemon(True)
+      daemon_thread.start()
+      cluster_details = self.get_cluster_details(cluster_metadata)
+      bucket_name = cluster_details.config.config_bucket
+      gcs_path = 'gs://' + bucket_name + '/google-cloud-dataproc-metainfo/'
+      for file in self._fs._list(gcs_path):
+        if cluster_metadata.cluster_name in file.path:
+          # this file path split will look something like:
+          # ['gs://.../google-cloud-dataproc-metainfo/{staging_dir}/',
+          # '-{node-type}/dataproc-startup-script_output']
+          return file.path.split(cluster_metadata.cluster_name)[0]
+    except Exception as e:
+      _LOGGER.error(
+          'Failed to get %s cluster staging bucket.',
+          cluster_metadata.cluster_name)
+      raise e
+
+  def parse_master_url_and_dashboard(

Review comment:
       Too bad there isn't an API for this. The log does come from https://github.com/apache/flink/blob/master/flink-yarn/src/main/java/org/apache/flink/yarn/YarnClusterDescriptor.java#L1799. So for released flink versions, this log must be stable. We don't need to worry about a sudden break of integration.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscribe@beam.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org