You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by go...@apache.org on 2013/09/09 18:41:04 UTC

svn commit: r1521186 - /incubator/climate/trunk/examples/simple_model_to_model_bias.py

Author: goodale
Date: Mon Sep  9 16:41:04 2013
New Revision: 1521186

URL: http://svn.apache.org/r1521186
Log:
Fixed an import statement that uses the python built-in 'eval'

Modified:
    incubator/climate/trunk/examples/simple_model_to_model_bias.py

Modified: incubator/climate/trunk/examples/simple_model_to_model_bias.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/examples/simple_model_to_model_bias.py?rev=1521186&r1=1521185&r2=1521186&view=diff
==============================================================================
--- incubator/climate/trunk/examples/simple_model_to_model_bias.py (original)
+++ incubator/climate/trunk/examples/simple_model_to_model_bias.py Mon Sep  9 16:41:04 2013
@@ -4,7 +4,7 @@ import numpy as np
 
 import ocw.data_source.local as local
 import ocw.dataset_processor as dsp
-import ocw.evaluation as eval
+import ocw.evaluation as evaluation
 import ocw.metrics as metrics
 import ocw.plotter as plotter
 
@@ -64,7 +64,7 @@ bias = metrics.Bias()
 # Evaluation can take in multiple targets and metrics, so we need to convert
 # our examples into Python lists.  Evaluation will iterate over the lists
 print("Making the Evaluation definition")
-bias_evaluation = eval.Evaluation(knmi_dataset, [wrf_dataset], [bias])
+bias_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [bias])
 print("Executing the Evaluation using the object's run() method")
 bias_evaluation.run()