You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2013/08/16 19:02:28 UTC

svn commit: r1514788 - /incubator/climate/branches/RefactorInput/ocw/evaluation.py

Author: joyce
Date: Fri Aug 16 17:02:28 2013
New Revision: 1514788

URL: http://svn.apache.org/r1514788
Log:
CLIMATE-256 - Use Bounds object in Evaluation.

Modified:
    incubator/climate/branches/RefactorInput/ocw/evaluation.py

Modified: incubator/climate/branches/RefactorInput/ocw/evaluation.py
URL: http://svn.apache.org/viewvc/incubator/climate/branches/RefactorInput/ocw/evaluation.py?rev=1514788&r1=1514787&r2=1514788&view=diff
==============================================================================
--- incubator/climate/branches/RefactorInput/ocw/evaluation.py (original)
+++ incubator/climate/branches/RefactorInput/ocw/evaluation.py Fri Aug 16 17:02:28 2013
@@ -22,10 +22,10 @@ Classes: 
 
 import logging
 from metrics import Metric
-from dataset import Dataset
+from dataset import Dataset, Bounds
 import dataset_processor as DSP
 
-class Evaluation:
+class Evaluation(object):
     '''Container for running an evaluation
 
     An *Evaluation* is the running of one or more metrics on one or more 
@@ -60,9 +60,8 @@ class Evaluation:
                 in the evaluation.
         :type metrics: List of Metrics
         :param subregions: (Optional) Subregion information to use in the
-                evaluation. A subregion is specified by a bounds of the form
-                [latMin, lonMin, latMax, lonMax].
-        :type subregions: List of bounds 
+                evaluation. A subregion is specified with a Bounds object.
+        :type subregions: List of Bounds objects
 
         :raises: ValueError 
         '''
@@ -86,7 +85,7 @@ class Evaluation:
 
         #: An optional list of subregion bounds to use when running the
         #: evaluation. 
-        self.subregions = subregions
+        self._subregions = subregions
 
         #: A list containing the results of running regular metric evaluations. 
         #: The shape of results is ``(num_metrics, num_target_datasets)`` if
@@ -99,6 +98,23 @@ class Evaluation:
         #: num_target_ds + (1 if ref_dataset != None else 0``
         self.unary_results = []
 
+    @property
+    def subregions(self):
+        return self._subregions
+
+    @subregions.setter
+    def subregions(self, value):
+        # If the value is None, we don't need to check that it's well formed!
+        if value:
+            # All of the values passed in the iterable better be Bounds!
+            if not all([isinstance(bound, Bounds) for bound in value]):
+                error = (
+                    "Found invalid subregion information. Expected "
+                    "value to be an instance of Bounds."
+                )
+                raise ValueError(error)
+        self._subregions = value
+
     def add_dataset(self, target_dataset):
         '''Add a Dataset to the Evaluation.
 
@@ -225,8 +241,9 @@ class Evaluation:
                 for subregion in self.subregions:
                     # Subset the reference and target dataset with the 
                     # subregion information.
-                    new_ref, new_tar = DSP.subset(subregion,
-                                                  [self.ref_dataset, target])
+                    new_ref = DSP.subset(subregion, self.ref_dataset)
+                    new_tar = DSP.subset(subregion, target)
+
                     run_result = [metric.run(new_ref, new_tar)]
                     results[-1][-1].append(run_result)
         return results