You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2015/03/31 16:59:22 UTC

[03/13] climate git commit: CLIMATE-581 - Add API for metric export from Evaluation

CLIMATE-581 - Add API for metric export from Evaluation


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/0df15460
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/0df15460
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/0df15460

Branch: refs/heads/master
Commit: 0df15460b27c756ab20a761560b80a79a7b78db8
Parents: 41879f9
Author: Michael Joyce <jo...@apache.org>
Authored: Tue Mar 24 16:00:18 2015 -0700
Committer: Michael Joyce <jo...@apache.org>
Committed: Tue Mar 24 16:00:18 2015 -0700

----------------------------------------------------------------------
 ocw-config-runner/configuration_writer.py | 15 +++++++++++++++
 1 file changed, 15 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/0df15460/ocw-config-runner/configuration_writer.py
----------------------------------------------------------------------
diff --git a/ocw-config-runner/configuration_writer.py b/ocw-config-runner/configuration_writer.py
index ad14a91..bf1b8a0 100644
--- a/ocw-config-runner/configuration_writer.py
+++ b/ocw-config-runner/configuration_writer.py
@@ -55,6 +55,21 @@ def generate_dataset_information(dataset):
 
     return info
 
+def generate_metric_information(evaluation):
+    ''' Generate metric config file output from a given Evaluation object.
+
+    :param evaluation: The evaluation object from which to extract metrics.
+    :type evaluation: :class:`evaluation.Evaluation`
+
+    :returns: A :func:`list` of :mod:`metrics` object names for output into
+        a configuration file.
+    :rtype: :func:`list` of :mod:`metrics`
+    '''
+    unary_metrics = [x.__class__.__name__ for x in evaluation.unary_metrics]
+    binary_metrics = [x.__class__.__name__ for x in evaluation.metrics]
+
+    return unary_metrics + binary_metrics
+
 def _extract_local_dataset_info(dataset):
     ''''''
     dataset_info = {'optional_args': {}}