You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2015/06/05 17:50:09 UTC

[02/10] climate git commit: CLIMATE-583 - Allow for partial evaluation runs

CLIMATE-583 - Allow for partial evaluation runs

Allow for a user to do partial evaluation runs by specifying that config
parsing errors should be ignored. For instance, suppose that a user
wants to generate only a subregion plot. Normally they would have to
load and run a fake evaluation just to get the subregion plot. With this
new optional argument the user can pass only subregion information and
plot information in the config and still run the evaluation.

Note, there are no guarantees that any partial evaluation will work.
This option gives the user plenty of rope to hang themselves and should
really only be used when you want to generate plots that don't require a
full evaluation run (such as subregion plots).

- Add optional command line arg that tells the parser/runner to ignore
  config parsing errors. This will cause the evaluation to "run" even if
  the configuration isn't value.
- When running an evaluation, only run if the evaluation object passes
  an is_valid check. Normally this is done in the evaluation object
  anyway when calling run but calling this early prevents error messages
  from being dumped.
- Make evaluation creation a bit more robust. Instead of just assuming
  that 'datasets' is always present we first check that the value is not
  None in the config before trying to check for 'reference' or
  'targets'. Similarly with metrics. This is necessary for "partial"
  evaluations.


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/ba855628
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/ba855628
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/ba855628

Branch: refs/heads/master
Commit: ba8556287b3558eb6554a32596976c6a9ae98b11
Parents: f316aef
Author: Michael Joyce <jo...@apache.org>
Authored: Mon Jun 1 15:09:18 2015 -0700
Committer: Michael Joyce <jo...@apache.org>
Committed: Thu Jun 4 14:44:13 2015 -0700

----------------------------------------------------------------------
 ocw-config-runner/evaluation_creation.py        | 21 +++++++++++---------
 ocw-config-runner/ocw_evaluation_from_config.py | 19 ++++++++++++++----
 2 files changed, 27 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/ba855628/ocw-config-runner/evaluation_creation.py
----------------------------------------------------------------------
diff --git a/ocw-config-runner/evaluation_creation.py b/ocw-config-runner/evaluation_creation.py
index d5bc6d0..88394de 100644
--- a/ocw-config-runner/evaluation_creation.py
+++ b/ocw-config-runner/evaluation_creation.py
@@ -45,18 +45,21 @@ def generate_evaluation_from_config(config_data):
     """
     # Load datasets
     reference = None
-    targets = None
-    if 'reference' in config_data['datasets']:
-        reference = _load_dataset(config_data['datasets']['reference'])
+    targets = []
+    if config_data['datasets']:
+        if 'reference' in config_data['datasets']:
+            reference = _load_dataset(config_data['datasets']['reference'])
 
-    if 'targets' in config_data['datasets']:
-        targets = [_load_dataset(t) for t in config_data['datasets']['targets']]
+        if 'targets' in config_data['datasets']:
+            targets = [_load_dataset(t) for t in config_data['datasets']['targets']]
 
-    reference, targets = _prepare_datasets_for_evaluation(reference,
-                                                          targets,
-                                                          config_data)
+        reference, targets = _prepare_datasets_for_evaluation(reference,
+                                                              targets,
+                                                              config_data)
     # Load metrics
-    eval_metrics = [_load_metric(m)() for m in config_data['metrics']]
+    eval_metrics = []
+    if config_data['metrics']:
+        eval_metrics = [_load_metric(m)() for m in config_data['metrics']]
 
     # Load Subregions (if present)
     subregions = None

http://git-wip-us.apache.org/repos/asf/climate/blob/ba855628/ocw-config-runner/ocw_evaluation_from_config.py
----------------------------------------------------------------------
diff --git a/ocw-config-runner/ocw_evaluation_from_config.py b/ocw-config-runner/ocw_evaluation_from_config.py
index e27acc2..93b411a 100644
--- a/ocw-config-runner/ocw_evaluation_from_config.py
+++ b/ocw-config-runner/ocw_evaluation_from_config.py
@@ -27,7 +27,7 @@ import yaml
 logging.basicConfig()
 logger = logging.getLogger(__name__)
 
-def run_evaluation_from_config(config_file_path):
+def run_evaluation_from_config(config_file_path, ignore_config_errors=False):
     """ Run an OCW evaluation specified by a config file.
 
     :param config_file_path: The file path to a OCW compliant YAML file
@@ -35,18 +35,28 @@ def run_evaluation_from_config(config_file_path):
         the valid options that you can set in the config please check the
         project wiki https://cwiki.apache.org/confluence/display/climate/home#'.
     :type config_file_path: :mod:`string`
+
+    :param ignore_config_errors: When this is true configuration parsing errors
+        will NOT interrupt the evaluation run. Note, it is very unlikely that
+        you will want this value set. However it is possible that you will want
+        to graph something that doesn't require a full evaluation run. This is
+        provided for that situation.
+    :type ignore_config_errors: :func:`bool`
     """
     config = yaml.load(open(config_file_path, 'r'))
 
-    if not is_config_valid(config):
+    if not ignore_config_errors and not is_config_valid(config):
         logger.warning(
             'Unable to validate configuration file. Exiting evaluation. '
             'Please check documentation for config information.'
         )
+
         sys.exit(1)
 
     evaluation = generate_evaluation_from_config(config)
-    evaluation.run()
+
+    if evaluation._evaluation_is_valid():
+        evaluation.run()
 
     plot_from_config(evaluation, config)
 
@@ -56,6 +66,7 @@ if __name__ == '__main__':
 
     parser = argparse.ArgumentParser(description=description, epilog=epilog)
     parser.add_argument('config', help='Path to YAML config file for the evaluation')
+    parser.add_argument('ignore_config_errors', nargs='?', default=False, type=bool)
     args = parser.parse_args()
 
-    run_evaluation_from_config(args.config)
+    run_evaluation_from_config(args.config, args.ignore_config_errors)