You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by go...@apache.org on 2013/08/28 20:21:02 UTC
svn commit: r1518315 - in /incubator/climate/trunk/ocw: evaluation.py
metrics.py tests/test_evaluation.py
Author: goodale
Date: Wed Aug 28 18:21:01 2013
New Revision: 1518315
URL: http://svn.apache.org/r1518315
Log:
CLIMATE-281: Updating Evaluation Class (and Metrics)
* Added a test to cover Evaluation.run() method using a simple Bias() calculation
* Updated all the methods in Evaluation so they have the proper use of self.
* Fixed a nested list when running metric.run() within _run_no_subregion_evaluation
* Updated the docstrings and returns from metrics.py so they now return Numpy Arrays across the board
Modified:
incubator/climate/trunk/ocw/evaluation.py
incubator/climate/trunk/ocw/metrics.py
incubator/climate/trunk/ocw/tests/test_evaluation.py
Modified: incubator/climate/trunk/ocw/evaluation.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/evaluation.py?rev=1518315&r1=1518314&r2=1518315&view=diff
==============================================================================
--- incubator/climate/trunk/ocw/evaluation.py (original)
+++ incubator/climate/trunk/ocw/evaluation.py Wed Aug 28 18:21:01 2013
@@ -222,14 +222,14 @@ class Evaluation(object):
logging.warning(error)
return
- if _should_run_regular_metrics():
+ if self._should_run_regular_metrics():
if self.subregions:
- self.results = _run_subregion_evaluation()
+ self.results = self._run_subregion_evaluation()
else:
- self.results = _run_no_subregion_evaluation()
+ self.results = self._run_no_subregion_evaluation()
- if _should_run_unary_metrics():
- self.unary_results = _run_unary_metric_evaluation()
+ if self._should_run_unary_metrics():
+ self.unary_results = self._run_unary_metric_evaluation()
def _evaluation_is_valid(self):
'''Check if the evaluation is well-formed.
@@ -240,8 +240,8 @@ class Evaluation(object):
* If there is a regular metric there must be a reference dataset and
at least one target dataset.
'''
- run_reg = _should_run_regular_metrics()
- run_unary = _should_run_unary_metrics()
+ run_reg = self._should_run_regular_metrics()
+ run_unary = self._should_run_unary_metrics()
reg_valid = self.ref_dataset != None and len(self.target_datasets) > 0
unary_valid = self.ref_dataset != None or len(self.target_datasets) > 0
@@ -254,13 +254,13 @@ class Evaluation(object):
else:
return false
- def _should_run_regular_metrics():
+ def _should_run_regular_metrics(self):
return len(self.metrics) > 0
- def _should_run_unary_metrics():
+ def _should_run_unary_metrics(self):
return len(self.unary_metrics) > 0
- def _run_subregion_evaluation():
+ def _run_subregion_evaluation(self):
results = []
for target in self.target_datasets:
results.append([])
@@ -276,16 +276,17 @@ class Evaluation(object):
results[-1][-1].append(run_result)
return results
- def _run_no_subregion_evaluation():
+ def _run_no_subregion_evaluation(self):
results = []
for target in self.target_datasets:
results.append([])
for metric in self.metrics:
- run_result = [metric.run(self.ref_dataset, target)]
+ datasets = (self.ref_dataset, target)
+ run_result = metric.run(datasets)
results[-1].append(run_result)
return results
- def _run_unary_metric_evaluation():
+ def _run_unary_metric_evaluation(self):
unary_results = []
for metric in self.unary_metrics:
unary_results.append([])
Modified: incubator/climate/trunk/ocw/metrics.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/metrics.py?rev=1518315&r1=1518314&r2=1518315&view=diff
==============================================================================
--- incubator/climate/trunk/ocw/metrics.py (original)
+++ incubator/climate/trunk/ocw/metrics.py Wed Aug 28 18:21:01 2013
@@ -46,8 +46,8 @@ class Metric():
binary, then datasets[0] contains the reference dataset and \
datasets[1] contains the target dataset.
:type datasets: Tuple
- :returns: A list containing the results of running the metric.
- :trype: List
+ :returns: An Array containing the results of running the metric.
+ :trype: Numpy Array
'''
@@ -72,11 +72,11 @@ class Bias(Metric):
reference dataset is given in datasets[0] and the target \
dataset is given in datasets[1].
:type datasets: Tuple
- :returns: A list containing the difference between the reference \
+ :returns: An array containing the difference between the reference \
dataset and the target dataset.
- :rtype: List
+ :rtype: Numpy Array
'''
- return [datasets[0].values - datasets[1].values]
+ return datasets[0].values - datasets[1].values
class TemporalStdDev(Metric):
@@ -99,7 +99,7 @@ class TemporalStdDev(Metric):
:param datasets: The datasets on which to calculate the temporal \
std. dev. in datasets[0].
:type datasets: Tuple
- :returns: A list containing the temporal std. dev.
- :rtype: List
+ :returns: An array containing the temporal std. dev.
+ :rtype: Numpy Array
'''
return datasets[0].values.std(axi=0, ddof=1)
Modified: incubator/climate/trunk/ocw/tests/test_evaluation.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/tests/test_evaluation.py?rev=1518315&r1=1518314&r2=1518315&view=diff
==============================================================================
--- incubator/climate/trunk/ocw/tests/test_evaluation.py (original)
+++ incubator/climate/trunk/ocw/tests/test_evaluation.py Wed Aug 28 18:21:01 2013
@@ -117,6 +117,13 @@ class TestEvaluation(unittest.TestCase):
self.assertEqual(len(self.eval.metrics), 0)
self.eval.add_metrics([Bias(), Bias()])
self.assertEqual(len(self.eval.metrics), 2)
+
+ def test_bias_output_shape(self):
+ bias_eval = Evaluation(self.test_dataset, [self.another_test_dataset], [Bias()])
+ bias_eval.run()
+ input_shape = tuple(self.test_dataset.values.shape)
+ bias_results_shape = tuple(bias_eval.results[0][0].shape)
+ self.assertEqual(input_shape, bias_results_shape)
if __name__ == '__main__':
unittest.main()