You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2014/02/23 05:45:08 UTC

svn commit: r1570969 - in /incubator/climate/trunk/ocw-ui/backend: processing.py run_webservices.py

Author: joyce
Date: Sun Feb 23 04:45:08 2014
New Revision: 1570969

URL: http://svn.apache.org/r1570969
Log:
CLIMATE-332 - Begin plot testing

- Switch route over to POST instead of GET.
- Update documentation for new POST requirements. Remove duplicate
  sections.
- Update all data request grabbing to use a more generic index into a
  temporary variable `data`. Remove all ast.literal_eval calls as Bottle
  automatically handles this for us.
- Add initial subset-ing code. Currently this is not implemented but
  will be at a later date.
- Fix bug in _process_dataset_object where Dataset objects were being
  created and not returned.
- Fix keyword arg grid_shape typos in call to plotter code.
- Add processing to web service initialization.

Modified:
    incubator/climate/trunk/ocw-ui/backend/processing.py
    incubator/climate/trunk/ocw-ui/backend/run_webservices.py

Modified: incubator/climate/trunk/ocw-ui/backend/processing.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw-ui/backend/processing.py?rev=1570969&r1=1570968&r2=1570969&view=diff
==============================================================================
--- incubator/climate/trunk/ocw-ui/backend/processing.py (original)
+++ incubator/climate/trunk/ocw-ui/backend/processing.py Sun Feb 23 04:45:08 2014
@@ -17,8 +17,7 @@
 
 ''' Provides endpoints for running an OCW evaluation. '''
 
-from ast import literal_eval
-from datetime import timedelta
+from datetime import timedelta, datetime
 import inspect
 import sys
 import os
@@ -29,8 +28,9 @@ import ocw.data_source.local as local
 import ocw.data_source.rcmed as rcmed
 import ocw.dataset_processor as dsp
 from ocw.evaluation import Evaluation
+from ocw.dataset import Bounds
 import ocw.metrics as metrics
-#import ocw.plotter as plotter
+import ocw.plotter as plotter
 
 import numpy as np
 
@@ -39,12 +39,12 @@ processing_app = Bottle()
 # TODO: Factor this out of all but the main modules
 WORK_DIR = "/tmp/ocw"
 
-@processing_app.route('/run_evaluation/')
+@processing_app.route('/run_evaluation/', method='POST')
 def run_evaluation():
     ''' Run an OCW Evaluation.
 
-    run_evaluation expects the Evaluation parameters to be encoded in request
-    parameters with the following format.
+    run_evaluation expects the Evaluation parameters to be POSTed in
+    the following format.
 
     ..sourcecode: javascript
 
@@ -77,13 +77,13 @@ def run_evaluation():
             // reference_dataset above.
             'target_datasets': [{...}, {...}, ...],
 
-            // All the datasets are re-bin to the reference dataset
+            // All the datasets are re-binned to the reference dataset
             // before being added to an experiment. This step (in degrees)
-            // is used when re-bin both the reference and target datasets.
-            'spatial_rebin_lat_step': The lat degree step to use when re-bin,
+            // is used when re-binning both the reference and target datasets.
+            'spatial_rebin_lat_step': The lat degree step. Integer > 0,
 
             // Same as above, but for lon
-            'spatial_rebin_lon_step': The lon degree step to use when re-bin,
+            'spatial_rebin_lon_step': The lon degree step. Integer > 0,
 
             // The temporal resolution to use when doing a temporal re-bin
             // This is a timedelta of days to use so daily == 1, monthly is
@@ -103,11 +103,6 @@ def run_evaluation():
             'lon_min': The minimum longitude value,
             'lon_max': The maximum longitude value,
 
-            // The degree step that the latitude and longitude values should be
-            // re-binned to. Values must be > 0
-            'lat_degree_step': Integer > 0,
-            'lon_degree_step': Integer > 0,
-
             // NOTE: At the moment, subregion support is fairly minimal. This
             // will be addressed in the future. Ideally, the user should be able
             // to load a file that they have locally. That would change the
@@ -118,34 +113,43 @@ def run_evaluation():
     '''
     # TODO: validate input parameters and return an error if not valid
 
+    data = request.json
+
     eval_bounds = {
-        'start_time': request.query.start_time,
-        'end_time': request.query.end_time,
-        'lat_min': request.query.lat_min,
-        'lat_max': request.query.lat_max,
-        'lon_min': request.query.lon_min,
-        'lon_max': request.query.lon_max
+        'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
+        'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
+        'lat_min': data['lat_min'],
+        'lat_max': data['lat_max'],
+        'lon_min': data['lon_min'],
+        'lon_max': data['lon_max']
     }
 
     # Load all the datasets
-    ref_object = literal_eval(request.query['reference_dataset'])
-    ref_dataset = _process_dataset_object(ref_object, eval_bounds)
+    ref_dataset = _process_dataset_object(data['reference_dataset'], eval_bounds)
 
-    target_objects = literal_eval(request.query['target_datasets'])
     target_datasets = [_process_dataset_object(obj, eval_bounds)
 					   for obj
-					   in target_objects]
+					   in data['target_datasets']]
+
+    # TODO, Need to subset here! At the moment the start/end times aren't
+    # being used to subset.
+    subset = Bounds(eval_bounds['lat_min'],
+                    eval_bounds['lat_max'],
+                    eval_bounds['lon_min'],
+                    eval_bounds['lon_max'],
+                    eval_bounds['start_time'],
+                    eval_bounds['end_time'])
 
     # Do temporal re-bin based off of passed resolution
-    time_delta = timedelta(days=request.query['temporal_resolution'])
+    time_delta = timedelta(days=data['temporal_resolution'])
     ref_dataset = dsp.temporal_rebin(ref_dataset, time_delta)
     target_datasets = [dsp.temporal_rebin(ds, time_delta)
 					   for ds
 					   in target_datasets]
 
     # Do spatial re=bin based off of reference dataset + lat/lon steps
-    lat_step = request.query['lat_degree_step']
-    lon_step = request.query['lon_degree_step']
+    lat_step = data['spatial_rebin_lat_step']
+    lon_step = data['spatial_rebin_lon_step']
     lat_bins, lon_bins = _calculate_new_latlon_bins(eval_bounds,
 													lat_step,
 													lon_step)
@@ -156,7 +160,7 @@ def run_evaluation():
 						in target_datasets]
 
     # Load metrics
-    loaded_metrics = _load_metrics(literal_eval(request.query['metrics']))
+    loaded_metrics = _load_metrics(data['metrics'])
 
     # Prime evaluation object with data
     evaluation = Evaluation(ref_dataset, target_datasets, loaded_metrics)
@@ -216,10 +220,10 @@ def _process_dataset_object(dataset_obje
 
     # If we should load with local
     if source_id == 1:
-        _load_local_dataset_object(dataset_info)
+        return _load_local_dataset_object(dataset_info)
     # If we should load with RCMED
     elif source_id == 2:
-        _load_rcmed_dataset_object(dataset_info, eval_bounds)
+        return _load_rcmed_dataset_object(dataset_info, eval_bounds)
     else:
         cur_frame = sys._getframe().f_code
         err = "{}.{}: Invalid data_source_id - {}".format(
@@ -412,7 +416,7 @@ def _generate_evaluation_plots(evaluatio
 										 lon_bins,
 										 fname=file_name,
 										 ptitle=plot_title,
-                                         grid_shape=grid_shape)
+                                         gridshape=grid_shape)
 
     if evaluation.unary_results != []:
         for metric_index, metric in enumerate(evaluation.unary_metrics):
@@ -430,7 +434,7 @@ def _generate_evaluation_plots(evaluatio
 										   lon_bins,
 										   fname=file_name,
 										   ptitle=plot_title,
-                                           grid_shape=grid_shape)
+                                           gridshape=grid_shape)
 
 def _calculate_grid_shape(reference_dataset, max_cols=6):
     ''' Calculate the plot grid shape given a reference dataset. 

Modified: incubator/climate/trunk/ocw-ui/backend/run_webservices.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw-ui/backend/run_webservices.py?rev=1570969&r1=1570968&r2=1570969&view=diff
==============================================================================
--- incubator/climate/trunk/ocw-ui/backend/run_webservices.py (original)
+++ incubator/climate/trunk/ocw-ui/backend/run_webservices.py Sun Feb 23 04:45:08 2014
@@ -20,11 +20,13 @@ from bottle import Bottle, response, sta
 from local_file_metadata_extractors import lfme_app
 from directory_helpers import dir_app
 from rcmed_helpers import rcmed_app
+from processing import processing_app
 
 app = Bottle()
 app.mount('/lfme/', lfme_app)
 app.mount('/dir/', dir_app)
 app.mount('/rcmed/', rcmed_app)
+app.mount('/processing/', processing_app)
 
 @app.route('/')
 @app.route('/index.html')