You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sdap.apache.org by sk...@apache.org on 2023/11/16 21:22:22 UTC

(incubator-sdap-nexus) branch SDAP-500 created (now 4ab2f9b)

This is an automated email from the ASF dual-hosted git repository.

skperez pushed a change to branch SDAP-500
in repository https://gitbox.apache.org/repos/asf/incubator-sdap-nexus.git


      at 4ab2f9b  pagination improvements

This branch includes the following new commits:

     new 4ab2f9b  pagination improvements

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



(incubator-sdap-nexus) 01/01: pagination improvements

Posted by sk...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

skperez pushed a commit to branch SDAP-500
in repository https://gitbox.apache.org/repos/asf/incubator-sdap-nexus.git

commit 4ab2f9b4a4f72a34b922cb496cedb87e684335b7
Author: skorper <st...@gmail.com>
AuthorDate: Thu Nov 16 13:22:09 2023 -0800

    pagination improvements
---
 .../webservice/algorithms/doms/ExecutionStatus.py  |  5 ++-
 .../webservice/algorithms/doms/ResultsRetrieval.py |  2 ++
 analysis/webservice/algorithms_spark/Matchup.py    | 10 +++---
 analysis/webservice/apidocs/openapi.yml            | 14 ++++++++
 .../webservice/webmodel/NexusExecutionResults.py   | 40 ++++++++++++++--------
 5 files changed, 50 insertions(+), 21 deletions(-)

diff --git a/analysis/webservice/algorithms/doms/ExecutionStatus.py b/analysis/webservice/algorithms/doms/ExecutionStatus.py
index 17c6ca9..63cf423 100644
--- a/analysis/webservice/algorithms/doms/ExecutionStatus.py
+++ b/analysis/webservice/algorithms/doms/ExecutionStatus.py
@@ -42,6 +42,8 @@ class ExecutionStatusHandler(BaseDomsHandler.BaseDomsQueryCalcHandler):
         except ValueError:
             raise NexusProcessingException(reason='"id" argument must be a valid uuid', code=400)
 
+        filename = request.get_argument('filename', None)
+
         # Check if the job is done
         with ResultsRetrieval(self.config) as retrieval:
             try:
@@ -74,5 +76,6 @@ class ExecutionStatusHandler(BaseDomsHandler.BaseDomsQueryCalcHandler):
             host=host,
             num_primary_matched=execution_stats.get('numPrimaryMatched'),
             num_secondary_matched=execution_stats.get('numSecondaryMatched'),
-            num_unique_secondaries=execution_stats.get('numUniqueSecondaries')
+            num_unique_secondaries=execution_stats.get('numUniqueSecondaries'),
+            filename=filename
         )
diff --git a/analysis/webservice/algorithms/doms/ResultsRetrieval.py b/analysis/webservice/algorithms/doms/ResultsRetrieval.py
index f03c1ca..cdec929 100644
--- a/analysis/webservice/algorithms/doms/ResultsRetrieval.py
+++ b/analysis/webservice/algorithms/doms/ResultsRetrieval.py
@@ -45,6 +45,8 @@ class DomsResultsRetrievalHandler(BaseDomsHandler.BaseDomsQueryCalcHandler):
 
         simple_results = computeOptions.get_boolean_arg("simpleResults", default=False)
 
+        filename = computeOptions.get_argument("filename", default=None)
+
         with ResultsStorage.ResultsRetrieval(self.config) as storage:
             params, stats, data = storage.retrieveResults(execution_id, trim_data=simple_results, page_num=page_num, page_size=page_size)
 
diff --git a/analysis/webservice/algorithms_spark/Matchup.py b/analysis/webservice/algorithms_spark/Matchup.py
index 8955d95..7c7f551 100644
--- a/analysis/webservice/algorithms_spark/Matchup.py
+++ b/analysis/webservice/algorithms_spark/Matchup.py
@@ -219,12 +219,13 @@ class Matchup(NexusCalcSparkTornadoHandler):
         end_seconds_from_epoch = int((end_time - EPOCH).total_seconds())
 
         prioritize_distance = request.get_boolean_arg("prioritizeDistance", default=True)
+        filename = request.get_argument('filename', default=None)
 
 
         return bounding_polygon, primary_ds_name, secondary_ds_names, parameter_s, \
                start_time, start_seconds_from_epoch, end_time, end_seconds_from_epoch, \
                depth_min, depth_max, time_tolerance, radius_tolerance, \
-               platforms, match_once, prioritize_distance
+               platforms, match_once, prioritize_distance, filename
 
     def get_job_pool(self, tile_ids):
         if len(tile_ids) > LARGE_JOB_THRESHOLD:
@@ -302,7 +303,7 @@ class Matchup(NexusCalcSparkTornadoHandler):
         bounding_polygon, primary_ds_name, secondary_ds_names, parameter_s, \
         start_time, start_seconds_from_epoch, end_time, end_seconds_from_epoch, \
         depth_min, depth_max, time_tolerance, radius_tolerance, \
-        platforms, match_once, prioritize_distance = self.parse_arguments(request)
+        platforms, match_once, prioritize_distance, filename = self.parse_arguments(request)
 
         args = {
             "primary": primary_ds_name,
@@ -375,9 +376,8 @@ class Matchup(NexusCalcSparkTornadoHandler):
             start=start,
             prioritize_distance=prioritize_distance
         ))
-
-        request.requestHandler.redirect(f'/job?id={execution_id}')
-
+        filename_param = f'&filename={filename}' if filename else ''
+        request.requestHandler.redirect(f'/job?id={execution_id}{filename_param}')
 
     @classmethod
     def convert_to_matches(cls, spark_result):
diff --git a/analysis/webservice/apidocs/openapi.yml b/analysis/webservice/apidocs/openapi.yml
index b719ad8..8c6efdc 100644
--- a/analysis/webservice/apidocs/openapi.yml
+++ b/analysis/webservice/apidocs/openapi.yml
@@ -166,6 +166,13 @@ paths:
             type: boolean
             default: true
           example: true
+        - in: query
+          name: filename
+          description: |
+            Optional filename. Will be passed into /job and results links
+          required: false
+          schema:
+            type: string
       responses:
         '200':
           description: Successful operation
@@ -689,6 +696,13 @@ paths:
             type: string
             format: uuid
           example: c864a51b-3d87-4872-9070-632820b1cae2
+        - in: query
+          name: filename
+          description: |
+            Optional filename. Will be passed into /job results links
+          required: false
+          schema:
+            type: string
   /job/cancel:
     get:
       summary: |
diff --git a/analysis/webservice/webmodel/NexusExecutionResults.py b/analysis/webservice/webmodel/NexusExecutionResults.py
index 7dd7af9..47a891a 100644
--- a/analysis/webservice/webmodel/NexusExecutionResults.py
+++ b/analysis/webservice/webmodel/NexusExecutionResults.py
@@ -27,15 +27,17 @@ class ExecutionStatus(Enum):
     CANCELLED = 'cancelled'
 
 
-def construct_job_status(job_state, created, updated, execution_id, params, host, message=''):
+def construct_job_status(job_state, created, updated, execution_id, params, host, message='',
+                         filename=None):
+    filename_param = f'&filename={filename}' if filename else ''
     return {
         'status': job_state.value,
         'message': message,
         'createdAt': created,
         'updatedAt': updated,
         'links': [{
-            'href': f'{host}/job?id={execution_id}',
-            'title': 'The current page',
+            'href': f'{host}/job?id={execution_id}{filename_param}',
+            'title': 'Get job status - the current page',
             'type': 'application/json',
             'rel': 'self'
         }],
@@ -45,14 +47,15 @@ def construct_job_status(job_state, created, updated, execution_id, params, host
 
 
 def construct_done(status, created, completed, execution_id, params, host,
-                   num_primary_matched, num_secondary_matched, num_unique_secondaries):
+                   num_primary_matched, num_secondary_matched, num_unique_secondaries, filename):
     job_body = construct_job_status(
         status,
         created,
         completed,
         execution_id,
         params,
-        host
+        host,
+        filename=filename
     )
     # Add stats to body
     job_body['totalPrimaryMatched'] = num_primary_matched
@@ -61,6 +64,8 @@ def construct_done(status, created, completed, execution_id, params, host,
         if num_primary_matched > 0 else 0
     job_body['totalUniqueSecondaryMatched'] = num_unique_secondaries
 
+    filename_param = f'&filename={filename}' if filename else ''
+
     # Construct urls
     formats = [
         ('CSV', 'text/csv'),
@@ -68,8 +73,8 @@ def construct_done(status, created, completed, execution_id, params, host,
         ('NETCDF', 'binary/octet-stream')
     ]
     data_links = [{
-        'href': f'{host}/cdmsresults?id={execution_id}&output={output_format}',
-        'title': 'Download results',
+        'href': f'{host}/cdmsresults?id={execution_id}&output={output_format}{filename_param}',
+        'title': f'Download {output_format} results',
         'type': mime,
         'rel': 'data'
     } for output_format, mime in formats]
@@ -77,14 +82,15 @@ def construct_done(status, created, completed, execution_id, params, host,
     return job_body
 
 
-def construct_running(status, created, execution_id, params, host):
+def construct_running(status, created, execution_id, params, host, filename):
     job_body = construct_job_status(
         status,
         created,
         None,
         execution_id,
         params,
-        host
+        host,
+        filename=filename
     )
     job_body['links'].append({
         'href': f'{host}/job/cancel?id={execution_id}',
@@ -94,7 +100,7 @@ def construct_running(status, created, execution_id, params, host):
     return job_body
 
 
-def construct_error(status, created, completed, execution_id, message, params, host):
+def construct_error(status, created, completed, execution_id, message, params, host, filename):
     return construct_job_status(
         status,
         created,
@@ -102,25 +108,27 @@ def construct_error(status, created, completed, execution_id, message, params, h
         execution_id,
         params,
         host,
-        message
+        message,
+        filename=filename
     )
 
 
-def construct_cancelled(status, created, completed, execution_id, params, host):
+def construct_cancelled(status, created, completed, execution_id, params, host, filename):
     return construct_job_status(
         status,
         created,
         completed,
         execution_id,
         params,
-        host
+        host,
+        filename=filename
     )
 
 
 class NexusExecutionResults:
     def __init__(self, status=None, created=None, completed=None, execution_id=None, message='',
                  params=None, host=None, status_code=200, num_primary_matched=None,
-                 num_secondary_matched=None, num_unique_secondaries=None):
+                 num_secondary_matched=None, num_unique_secondaries=None, filename=None):
         self.status_code = status_code
         self.status = status
         self.created = created
@@ -132,6 +140,7 @@ class NexusExecutionResults:
         self.num_primary_matched = num_primary_matched
         self.num_secondary_matched = num_secondary_matched
         self.num_unique_secondaries = num_unique_secondaries
+        self.filename = filename
 
     def toJson(self):
         params = {
@@ -139,7 +148,8 @@ class NexusExecutionResults:
             'created': self.created,
             'execution_id': self.execution_id,
             'params': self.execution_params,
-            'host': self.host
+            'host': self.host,
+            'filename': self.filename
         }
         if self.status == ExecutionStatus.SUCCESS:
             params['completed'] = self.completed