You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by go...@apache.org on 2016/11/28 22:53:49 UTC

[3/3] climate git commit: Bug fixes in run_RCMES.py

Bug fixes in run_RCMES.py


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/7187cf30
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/7187cf30
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/7187cf30

Branch: refs/heads/master
Commit: 7187cf306877ea471335d8d22704bee7653342e9
Parents: b04d86c
Author: Alex Goodman <ag...@users.noreply.github.com>
Authored: Tue Nov 15 17:58:57 2016 -0800
Committer: Alex Goodman <ag...@users.noreply.github.com>
Committed: Tue Nov 15 17:58:57 2016 -0800

----------------------------------------------------------------------
 RCMES/run_RCMES.py | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/7187cf30/RCMES/run_RCMES.py
----------------------------------------------------------------------
diff --git a/RCMES/run_RCMES.py b/RCMES/run_RCMES.py
index e65a452..027d2e2 100644
--- a/RCMES/run_RCMES.py
+++ b/RCMES/run_RCMES.py
@@ -90,20 +90,23 @@ extra_opts = {'min_lat': min_lat, 'max_lat': max_lat, 'min_lon': min_lon,
 data_info = config['datasets']
 
 # Extract info we don't want to put into the loader config
-# Multiplying Factor to scale obs by
-multiplying_factor = np.ones(len(data_info))
-for i, info in enumerate(reference_data_info):
-    multiplying_factor[i] = info.pop('multiplying_factor', 1)
+# Multiplying Factor to scale obs by. Currently only supported for reference
+# (first) dataset. We should instead make this a parameter for each
+# loader and Dataset objects.
+fact = data_info[0].pop('multiplying_factor', 1)
     
 """ Step 1: Load the datasets """
 print('Loading datasets:\n{}'.format(data_info))
 datasets = load_datasets_from_config(extra_opts, *data_info)
+multiplying_factor = np.ones(len(datasets))
+multiplying_factor[0] = fact
 names = [dataset.name for dataset in datasets]
 for i, dataset in enumerate(datasets):
     if temporal_resolution == 'daily' or temporal_resolution == 'monthly':
         datasets[i] = dsp.normalize_dataset_datetimes(dataset,
                                                       temporal_resolution)
-        datasets[i].values *= multiplying_factor[i]
+        if multiplying_factor[i] != 1:
+            datasets[i].values *= multiplying_factor[i]
 
 """ Step 2: Subset the data for temporal and spatial domain """
 # Create a Bounds object to use for subsetting
@@ -215,7 +218,7 @@ if config['use_subregions']:
     nsubregion = len(subregions)
 
     print('Calculating spatial averages and standard deviations of {} subregions'
-          .format(nsubregions))
+          .format(nsubregion))
 
     reference_subregion_mean, reference_subregion_std, subregion_array = (
         utils.calc_subregion_area_mean_and_std([reference_dataset], subregions))
@@ -237,8 +240,8 @@ if config['use_subregions']:
         subregions=subregions, subregion_array=subregion_array,
         ref_subregion_mean=reference_subregion_mean,
         ref_subregion_std=reference_subregion_std,
-        target_subregion_mean=target_subregion_mean,
-        target_subregion_std=target_subregion_std)
+        model_subregion_mean=target_subregion_mean,
+        model_subregion_std=target_subregion_std)
 else:
     dsp.write_netcdf_multiple_datasets_with_subregions(
                                 reference_dataset, reference_name, target_datasets,