You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2013/08/12 19:06:36 UTC

svn commit: r1513202 - /incubator/climate/branches/RefactorInput/ocw/evaluation.py

Author: joyce
Date: Mon Aug 12 17:06:36 2013
New Revision: 1513202

URL: http://svn.apache.org/r1513202
Log:
CLIMATE-214 - Minor Evaluation.py refactor

Modified:
    incubator/climate/branches/RefactorInput/ocw/evaluation.py

Modified: incubator/climate/branches/RefactorInput/ocw/evaluation.py
URL: http://svn.apache.org/viewvc/incubator/climate/branches/RefactorInput/ocw/evaluation.py?rev=1513202&r1=1513201&r2=1513202&view=diff
==============================================================================
--- incubator/climate/branches/RefactorInput/ocw/evaluation.py (original)
+++ incubator/climate/branches/RefactorInput/ocw/evaluation.py Mon Aug 12 17:06:36 2013
@@ -185,17 +185,7 @@ class Evaluation:
                 self.results = _run_no_subregion_evaluation()
 
         if _should_run_unary_metrics():
-            self.unary_results = []
-
-            for metric in self.unary_metrics:
-                self.unary_results.append([])
-                # Unary metrics should be run over the reference Dataset also
-                if self.ref_dataset:
-                    self.unary_results[-1].append(metric.run(ref_dataset))
-
-                for target in self.target_datasets:
-                    self.unary_results[-1].append(metric.run(target))
-
+            self.unary_results = _run_unary_metric_evaluation()
 
     def _evaluation_is_valid(self):
         '''Check if the evaluation is well-formed.
@@ -250,3 +240,13 @@ class Evaluation:
                 results[-1].append(run_result)
         return results
 
+    def _run_unary_metric_evaluation():
+        unary_results = []
+        for metric in self.unary_metrics:
+            unary_results.append([])
+            # Unary metrics should be run over the reference Dataset also
+            if self.ref_dataset:
+                unary_results[-1].append(metric.run(ref_dataset))
+
+            for target in self.target_datasets:
+                unary_results[-1].append(metric.run(target))