You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by hu...@apache.org on 2015/08/25 19:06:53 UTC

[3/7] climate git commit: subregion evaluation has been updated - one less hierarchy in the result list

subregion evaluation has been updated - one less hierarchy in the result list


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/d120a8f8
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/d120a8f8
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/d120a8f8

Branch: refs/heads/master
Commit: d120a8f8ca9229bafcb51b927d81a90f5e900c3c
Parents: 19e8d0c
Author: huikyole <hu...@argo.jpl.nasa.gov>
Authored: Tue Aug 18 17:21:04 2015 -0700
Committer: huikyole <hu...@argo.jpl.nasa.gov>
Committed: Tue Aug 18 17:21:04 2015 -0700

----------------------------------------------------------------------
 ocw/evaluation.py            | 70 ++++++++++++++++++++-------------------
 ocw/tests/test_evaluation.py | 31 ++++-------------
 2 files changed, 42 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/d120a8f8/ocw/evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index becced1..760325d 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -98,8 +98,10 @@ class Evaluation(object):
         #: The shape of results list is ``len(results_list) = num_metrics``.
         #: Each element of the list is a numpy.Masked array whose first dimension 
         #: is num_target_datasets.  
-        #: If the user specify subregion information, the shape
-        #: is ``(num_target_datasets, num_metrics, num_subregions)``.
+        #: If the user specify subregion information, the shape of result list 
+        #: is list[num_subregions][num_metrics]. Each element of this hierarchical
+        #: list is also a numpy.Masked array whose first dimension
+        #: is num_target_datasets.
         self.results = []
         #: A list containing the results of running the unary metric 
         #: evaluations. The shape of unary_results is 
@@ -274,21 +276,21 @@ class Evaluation(object):
 
     def _run_subregion_evaluation(self):
         results = []
-        new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
-
-        for target in self.target_datasets:
-            results.append([])
-            new_targets = [DSP.subset(s, target) for s in self.subregions]
-
+        for s in self.subregions:
+            subregion_results=[]
+            new_refs = DSP.subset(s, self.ref_dataset)
+            new_target0= DSP.subset(s, self.target_datasets[0])
             for metric in self.metrics:
-                results[-1].append([])
-
-                for i in range(len(self.subregions)):
-                    new_ref = new_refs[i]
-                    new_tar = new_targets[i]
+                run_result_shape = list((metric.run(new_refs, new_target0)).shape)
+                run_result_shape.insert(0, len(self.target_datasets))
+                run_result = ma.zeros(run_result_shape)
+     
+                for itarget, target in enumerate(self.target_datasets):
+                    new_target= DSP.subset(s, target)
+                    run_result[itarget,:] = metric.run(new_refs, new_target)
+                subregion_results.append(run_result)
+            results.append(subregion_results)
 
-                    run_result = metric.run(new_ref, new_tar)
-                    results[-1][-1].append(run_result)
         return results
 
     def _run_no_subregion_evaluation(self):
@@ -321,26 +323,26 @@ class Evaluation(object):
 
     def _run_subregion_unary_evaluation(self):
         unary_results = []
-        if self.ref_dataset:
-            new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
-
-        new_targets = [
-            [DSP.subset(s, t) for s in self.subregions]
-            for t in self.target_datasets
-        ]
-
-        for metric in self.unary_metrics:
-            unary_results.append([])
-
-            for i in range(len(self.subregions)):
-                unary_results[-1].append([])
-
+        for s in self.subregions:
+            subregion_results=[]
+            for metric in self.unary_metrics:
+                unary_result_shape = list((metric.run(self.target_datasets[0])).shape)
                 if self.ref_dataset:
-                    unary_results[-1][-1].append(metric.run(new_refs[i]))
-
-                for t in range(len(self.target_datasets)):
-                    unary_results[-1][-1].append(metric.run(new_targets[t][i]))
-
+                    unary_result_shape.insert(0, len(self.target_datasets)+1)
+                    num_refs = 1
+                else: 
+                    unary_result_shape.insert(0, len(self.target_datasets))
+                    num_refs = 0
+                unary_result = ma.zeros(unary_result_shape)
+                for itarget, target in enumerate(self.target_datasets):
+                    new_target = DSP.subset(s, target)
+                    unary_result[itarget+num_refs,:] = metric.run(new_target)
+                if self.ref_dataset:
+                    new_refs = DSP.subset(s, self.ref_dataset)
+                    unary_result[0,:] = metric.run(new_refs)
+                
+                subregion_results.append(unary_result)
+            unary_results.append(subregion_results)
         return unary_results
 
     def __str__(self):

http://git-wip-us.apache.org/repos/asf/climate/blob/d120a8f8/ocw/tests/test_evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_evaluation.py b/ocw/tests/test_evaluation.py
index c2b85de..566de86 100644
--- a/ocw/tests/test_evaluation.py
+++ b/ocw/tests/test_evaluation.py
@@ -168,26 +168,14 @@ class TestEvaluation(unittest.TestCase):
 
         # Expected result shape is
         # [
-        #   [
         #       [   # Subregions cause this extra layer
-        #           bias.run(reference, target1)
+        #           [number of targets, bias.run(reference, target1).shape]
         #       ]
         #   ],
-        #   [
-        #       [
-        #           bias.run(reference, target2)
-        #       ]
-        #   ]
-        # ]
-        self.assertTrue(len(bias_eval.results) == 2)
-
+        self.assertTrue(len(bias_eval.results) == 1)
         self.assertTrue(len(bias_eval.results[0]) == 1)
-        self.assertTrue(type(bias_eval.results[0]) == type([]))
-        self.assertTrue(len(bias_eval.results[1]) == 1)
-        self.assertTrue(type(bias_eval.results[1]) == type([]))
-
-        self.assertTrue(len(bias_eval.results[0][0]) == 1)
-        self.assertTrue(len(bias_eval.results[1][0]) == 1)
+        self.assertTrue(bias_eval.results[0][0].shape[0] == 2)
+        self.assertTrue(type(bias_eval.results) == type([]))
 
     def test_subregion_unary_result_shape(self):
         bound = Bounds(
@@ -205,20 +193,13 @@ class TestEvaluation(unittest.TestCase):
 
         # Expected result shape is
         # [
-        #   [   
         #       [   # Subregions cause this extra layer
-        #           temporalstddev.run(reference),
-        #           temporalstddev.run(target1),
-        #           temporalstddev.run(target2)
+        #           [3, temporalstddev.run(reference).shape],
         #       ]
-        #   ]
         # ]
         self.assertTrue(len(new_eval.unary_results) == 1)
         self.assertTrue(type(new_eval.unary_results) == type([]))
-
-        self.assertTrue(len(new_eval.unary_results[0]) == 1)
-
-        self.assertTrue(len(new_eval.unary_results[0][0]) == 3)
+        self.assertTrue(new_eval.unary_results[0][0].shape[0] == 3)
 
 
 if __name__  == '__main__':