You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2014/02/28 06:47:27 UTC

svn commit: r1572825 - /incubator/climate/trunk/ocw-ui/backend/processing.py

Author: joyce
Date: Fri Feb 28 05:47:26 2014
New Revision: 1572825

URL: http://svn.apache.org/r1572825
Log:
CLIMATE-333 - UI integration changes to processing

- Subset-ing is totally hopeless at the moment so it being removed until
  it can be fixed.
- Remove unused math import.
- Update 'Bias' name in run_evaluation docs.
- Explicitly cast bounds values to avoid unwanted surprises.
- Move timestamp versioning out of the plotter helper so the value can
  be returned to the callee for redirection.
- Add timestamp return to callee.
- Update plotter helper function to accept timestamp value.
- Local dataset load helper was using the wrong name for the dataset_id.
  Update it to use the proper 'dataset_id'.
- Update type plotter helper documentation and add timestamp
  information.

Modified:
    incubator/climate/trunk/ocw-ui/backend/processing.py

Modified: incubator/climate/trunk/ocw-ui/backend/processing.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw-ui/backend/processing.py?rev=1572825&r1=1572824&r2=1572825&view=diff
==============================================================================
--- incubator/climate/trunk/ocw-ui/backend/processing.py (original)
+++ incubator/climate/trunk/ocw-ui/backend/processing.py Fri Feb 28 05:47:26 2014
@@ -21,7 +21,7 @@ from datetime import timedelta, datetime
 import inspect
 import sys
 import os
-import math
+import json
 
 from bottle import Bottle, request, response
 
@@ -99,7 +99,7 @@ def run_evaluation():
 
             // A list of the metric class names to use in the evaluation. The
             // names must match the class name exactly.
-            'metrics': [BiasMetric, TemporalStdDev, ...]
+            'metrics': [Bias, TemporalStdDev, ...]
 
             // The bounding values used in the Evaluation. Note that lat values
             // should range from -180 to 180 and lon values from -90 to 90.
@@ -120,15 +120,16 @@ def run_evaluation():
     '''
     # TODO: validate input parameters and return an error if not valid
 
+    eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
     data = request.json
 
     eval_bounds = {
         'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
         'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
-        'lat_min': data['lat_min'],
-        'lat_max': data['lat_max'],
-        'lon_min': data['lon_min'],
-        'lon_max': data['lon_max']
+        'lat_min': float(data['lat_min']),
+        'lat_max': float(data['lat_max']),
+        'lon_min': float(data['lon_min']),
+        'lon_max': float(data['lon_max'])
     }
 
     # Load all the datasets
@@ -137,6 +138,7 @@ def run_evaluation():
     target_datasets = [_process_dataset_object(obj, eval_bounds)
 					   for obj
 					   in data['target_datasets']]
+
     # Do temporal re-bin based off of passed resolution
     time_delta = timedelta(days=data['temporal_resolution'])
     ref_dataset = dsp.temporal_rebin(ref_dataset, time_delta)
@@ -144,19 +146,18 @@ def run_evaluation():
 					   for ds
 					   in target_datasets]
 
-    # Subset the datasets
-    subset = Bounds(eval_bounds['lat_min'],
-                    eval_bounds['lat_max'],
-                    eval_bounds['lon_min'],
-                    eval_bounds['lon_max'],
-                    eval_bounds['start_time'],
-                    eval_bounds['end_time'])
-
-    ref_dataset = dsp.subset(subset, ref_dataset)
-    target_datasets = [dsp.subset(subset, ds)
-					   for ds
-					   in target_datasets]
-
+    ## Subset the datasets
+    #subset = Bounds(eval_bounds['lat_min'],
+                    #eval_bounds['lat_max'],
+                    #eval_bounds['lon_min'],
+                    #eval_bounds['lon_max'],
+                    #eval_bounds['start_time'],
+                    #eval_bounds['end_time'])
+
+    #ref_dataset = dsp.subset(subset, ref_dataset)
+    #target_datasets = [dsp.subset(subset, ds)
+					   #for ds
+					   #in target_datasets]
 
     # Do spatial re=bin based off of reference dataset + lat/lon steps
     lat_step = data['spatial_rebin_lat_step']
@@ -180,7 +181,9 @@ def run_evaluation():
     evaluation.run()
 
     # Plot
-    _generate_evaluation_plots(evaluation, lat_bins, lon_bins)
+    _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp)
+
+    return json.dumps({'eval_work_dir': eval_time_stamp})
 
 def _process_dataset_object(dataset_object, eval_bounds):
     ''' Convert an dataset object representation into an OCW Dataset
@@ -257,7 +260,7 @@ def _load_local_dataset_object(dataset_i
     :param dataset_info: The necessary data to load a local dataset with
         ocw.data_source.local. Must be of the form:
         {
-            'id': The path to the local file for loading,
+            'dataset_id': The path to the local file for loading,
             'var_name': The variable data to pull from the file,
             'lat_name': The latitude variable name,
             'lon_name': The longitude variable name,
@@ -271,7 +274,7 @@ def _load_local_dataset_object(dataset_i
     :raises KeyError: If the required keys aren't present in the dataset_info.
     :raises ValueError: If data_source.local could not load the requested file.
     '''
-    path = dataset_info['id']
+    path = dataset_info['dataset_id']
     var_name = dataset_info['var_name']
     lat_name = dataset_info['lat_name']
     lon_name = dataset_info['lon_name']
@@ -407,7 +410,7 @@ def _get_valid_metric_options():
             for name, obj in inspect.getmembers(metrics)
             if inspect.isclass(obj) and name not in invalid_metrics}
 
-def _generate_evaluation_plots(evaluation, lat_bins, lon_bins):
+def _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp):
     ''' Generate the Evaluation's plots
 
     .. note: This doesn't support graphing evaluations with subregion data.
@@ -416,13 +419,15 @@ def _generate_evaluation_plots(evaluatio
     :type evaluation: ocw.evaluation.Evaluation
     :param lat_bins: The latitude bin values used in the evaluation.
     :type lat_bins: List
-    :type lon_bins: The longitude bin values used in the evaluation.
+    :param lon_bins: The longitude bin values used in the evaluation.
     :type lon_bins: List
+    :param eval_time_stamp: The time stamp for the directory where
+        evaluation results should be saved.
+    :type eval_time_stamp: Time stamp of the form '%Y-%m-%d_%H-%M-%S'
 
     :raises ValueError: If there aren't any results to graph.
     '''
     # Create time stamp version-ed WORK_DIR for plotting
-    eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
     eval_path = os.path.join(WORK_DIR, eval_time_stamp)
     os.makedirs(eval_path)
 
@@ -451,7 +456,6 @@ def _generate_evaluation_plots(evaluatio
                 plot_title = _generate_binary_eval_plot_title(evaluation,
 															  dataset_index,
 															  metric_index)
-
                 plotter.draw_contour_map(results,
 										 lat_bins,
 										 lon_bins,