You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by hu...@apache.org on 2015/09/25 17:57:30 UTC

[3/4] climate git commit: CLIMATE-666 - Replace examples with the RCMES script and yaml files

CLIMATE-666 - Replace examples with the RCMES script and yaml files

- run_RCMES.py, example_package.py and .yaml files replace the old examples.


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/7396ffc8
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/7396ffc8
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/7396ffc8

Branch: refs/heads/master
Commit: 7396ffc82b246c1dcad71661b4884b93ea09bb41
Parents: 82f6651
Author: huikyole <hu...@argo.jpl.nasa.gov>
Authored: Tue Sep 22 13:56:14 2015 -0700
Committer: huikyole <hu...@argo.jpl.nasa.gov>
Committed: Tue Sep 22 13:56:14 2015 -0700

----------------------------------------------------------------------
 ...ia_prec_DJF_mean_taylor_diagram_to_TRMM.yaml |  45 +++++
 ...ordex-AF_tasmax_annual_mean_bias_to_cru.yaml |  46 +++++
 ...prec_subregion_annual_cycle_time_series.yaml |  90 +++++++++
 ...cap_prec_JJA_mean_taylor_diagram_to_cru.yaml |  44 +++++
 ...nterannual_variability_portrait_diagram.yaml |  75 ++++++++
 .../old_examples/knmi_to_cru31_full_bias.py     | 174 +++++++++++++++++
 .../old_examples/model_ensemble_to_rcmed.py     | 186 +++++++++++++++++++
 examples/old_examples/multi_model_evaluation.py | 151 +++++++++++++++
 .../old_examples/multi_model_taylor_diagram.py  | 144 ++++++++++++++
 .../old_examples/simple_model_to_model_bias.py  | 124 +++++++++++++
 .../simple_model_to_model_bias_DJF_and_JJA.py   |  64 +++++++
 examples/old_examples/simple_model_tstd.py      |  89 +++++++++
 examples/old_examples/subregions.py             |  53 ++++++
 .../old_examples/subregions_portrait_diagram.py | 139 ++++++++++++++
 examples/old_examples/taylor_diagram_example.py | 113 +++++++++++
 .../old_examples/time_series_with_regions.py    | 141 ++++++++++++++
 examples/run_RCMES.py                           |   7 +-
 17 files changed, 1683 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/cmip5_SE_Asia_prec_DJF_mean_taylor_diagram_to_TRMM.yaml
----------------------------------------------------------------------
diff --git a/examples/cmip5_SE_Asia_prec_DJF_mean_taylor_diagram_to_TRMM.yaml b/examples/cmip5_SE_Asia_prec_DJF_mean_taylor_diagram_to_TRMM.yaml
new file mode 100644
index 0000000..276e744
--- /dev/null
+++ b/examples/cmip5_SE_Asia_prec_DJF_mean_taylor_diagram_to_TRMM.yaml
@@ -0,0 +1,45 @@
+workdir: ./
+output_netcdf_filename: cmip5_SE_Asia_prec_DJF_1998-2010.nc
+
+# (RCMES will temporally subset data between month_start and month_end. If average_each_year is True (False), seasonal mean in each year is (not) calculated and used for metrics calculation.)
+time:
+    maximum_overlap_period: True  
+    start_time: 1981-01-01
+    end_time: 2010-12-31
+    temporal_resolution: monthly
+    month_start: 12
+    month_end: 2
+    average_each_year: False  
+
+space:
+    min_lat: -15.14
+    max_lat: 27.26
+    min_lon: 89.26  
+    max_lon: 146.96
+
+regrid:
+    regrid_on_reference: True  
+    regrid_dlat: 0.50
+    regrid_dlon: 0.50
+
+datasets:
+    reference:
+        data_source: rcmed
+        data_name: TRMM
+        dataset_id: 3
+        parameter_id: 36
+
+    targets:
+        data_source: local
+        path: ./data/pr_Amon*                                   
+        variable: pr    
+
+number_of_metrics_and_plots: 1
+
+metrics1: Taylor_diagram_spatial_pattern_of_multiyear_climatology
+
+plots1:
+    file_name: cmip5_SE_ASIA_prec_DJF_mean_taylor_diagram_to_TRMM
+
+use_subregions: False
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/cordex-AF_tasmax_annual_mean_bias_to_cru.yaml
----------------------------------------------------------------------
diff --git a/examples/cordex-AF_tasmax_annual_mean_bias_to_cru.yaml b/examples/cordex-AF_tasmax_annual_mean_bias_to_cru.yaml
new file mode 100644
index 0000000..042a9a3
--- /dev/null
+++ b/examples/cordex-AF_tasmax_annual_mean_bias_to_cru.yaml
@@ -0,0 +1,46 @@
+workdir: ./
+output_netcdf_filename: cordex-AF_CRU_taxmax_monthly_1990-2007.nc
+
+# (RCMES will temporally subset data between month_start and month_end. If average_each_year is True (False), seasonal mean in each year is (not) calculated and used for metrics calculation.)
+time:
+    maximum_overlap_period: True 
+    start_time: 1990-01-01
+    end_time: 2007-12-31
+    temporal_resolution: monthly
+    month_start: 1
+    month_end: 12
+    average_each_year: False
+
+space:
+    min_lat: -45.76
+    max_lat: 42.24
+    min_lon: -24.64
+    max_lon: 60.28
+
+regrid:
+    regrid_on_reference: False
+    regrid_dlat: 0.44
+    regrid_dlon: 0.44
+
+datasets:
+    reference:
+        data_source: rcmed
+        data_name: CRU
+        dataset_id: 10
+        parameter_id: 37
+
+    targets:
+        data_source: local
+        path: ./data/AFRICA*tasmax.nc                                                    
+        variable: tasmax  
+
+number_of_metrics_and_plots: 1
+
+metrics1: Map_plot_bias_of_multiyear_climatology
+
+plots1:
+    file_name: cordex-AF_tasmax_annual_mean_bias_to_cru
+    subplots_array: !!python/tuple [3,4] 
+
+use_subregions: False
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/cordex_AF_prec_subregion_annual_cycle_time_series.yaml
----------------------------------------------------------------------
diff --git a/examples/cordex_AF_prec_subregion_annual_cycle_time_series.yaml b/examples/cordex_AF_prec_subregion_annual_cycle_time_series.yaml
new file mode 100644
index 0000000..9483cae
--- /dev/null
+++ b/examples/cordex_AF_prec_subregion_annual_cycle_time_series.yaml
@@ -0,0 +1,90 @@
+workdir: ./
+output_netcdf_filename: cordex_AF_prec_monthly_mean_1990-2007.nc
+
+# (RCMES will temporally subset data between month_start and month_end. If average_each_year is True (False), seasonal mean in each year is (not) calculated and used for metrics calculation.)
+time:
+    maximum_overlap_period: True 
+    start_time: 1998-01-01
+    end_time: 2007-12-31
+    temporal_resolution: monthly
+    month_start: 1
+    month_end: 12
+    average_each_year: False
+
+space:
+    min_lat: -45.76
+    max_lat: 42.24
+    min_lon: -24.64
+    max_lon: 60.28
+
+regrid:
+    regrid_on_reference: False
+    regrid_dlat: 0.44
+    regrid_dlon: 0.44
+
+datasets:
+    reference:
+        data_source: rcmed
+        data_name: CRU  
+        dataset_id: 10
+        parameter_id: 37
+
+    targets:
+        data_source: local
+        path: ./data/AFRICA*pr.nc                                
+        variable: pr    
+
+number_of_metrics_and_plots: 1
+
+metrics1: Timeseries_plot_subregion_annual_cycle
+
+plots1:
+    file_name: cordex_AF_prec_subregion_annual_cycle_time_series
+    subplots_array: !!python/tuple [7,3]
+
+use_subregions: True 
+
+subregions:
+#subregion name (R01, R02, R03,....) followed by an array of boundaries [south, north, west, east]
+    R01:
+      [29.0, 36.5, -10.0, 0.0]
+    R02:
+      [29, 37.5, 0, 10]
+    R03:
+      [25, 32.5, 10, 20]
+    R04:
+      [25, 32.5, 20, 33]
+    R05:
+      [12, 20.0, -19.3, -10.2]
+    R06:
+      [15, 25.0, 15, 30]
+    R07:
+      [7.3, 15,  -10, 10]
+    R08:
+      [5, 7.3,  -10, 10]
+    R09:
+      [6.9, 15, 33.9, 40]
+    R10:
+      [2.2, 11.8, 44.2, 51.8]
+    R11:
+      [0, 10, 10, 25]
+    R12:
+      [-10, 0, 10, 25]
+    R13:
+      [-15, 0, 30, 40]
+    R14:
+      [-27.9, -21.4, 13.6, 20]
+    R15:
+      [-35, -27.9, 13.6, 20]
+    R16:
+      [-35, -21.4, 20, 35.7]
+    R17:
+      [-25.8, -11.7, 43.2, 50.3]
+    R18:
+      [25, 35.0, 33, 40]
+    R19:
+      [28, 35, 45, 50]
+    R20:
+      [13, 20.0, 43, 50]
+    R21:
+      [20, 27.5, 50, 58]

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/narccap_prec_JJA_mean_taylor_diagram_to_cru.yaml
----------------------------------------------------------------------
diff --git a/examples/narccap_prec_JJA_mean_taylor_diagram_to_cru.yaml b/examples/narccap_prec_JJA_mean_taylor_diagram_to_cru.yaml
new file mode 100644
index 0000000..c6b96cf
--- /dev/null
+++ b/examples/narccap_prec_JJA_mean_taylor_diagram_to_cru.yaml
@@ -0,0 +1,44 @@
+workdir: ./                                      
+output_netcdf_filename: narccap_prec_JJA_1980-2003.nc
+
+# (RCMES will temporally subset data between month_start and month_end. If average_each_year is True (False), seasonal mean in each year is (not) calculated and used for metrics calculation.)
+time:
+    maximum_overlap_period: False
+    start_time: 1980-01-01
+    end_time: 2003-12-31
+    temporal_resolution: monthly
+    month_start: 6
+    month_end: 8
+    average_each_year: True  
+
+space:
+    min_lat: 23.75
+    max_lat: 49.75
+    min_lon: -125.75
+    max_lon: -66.75
+
+regrid:
+    regrid_on_reference: False
+    regrid_dlat: 0.50
+    regrid_dlon: 0.50
+
+datasets:
+    reference:
+        data_source: rcmed
+        data_name: CRU
+        dataset_id: 10
+        parameter_id: 37
+
+    targets:
+        data_source: local
+        path: ./data/prec.*ncep.monavg.nc                                                    
+        variable: prec    
+
+number_of_metrics_and_plots: 1
+
+metrics1: Taylor_diagram_spatial_pattern_of_multiyear_climatology
+
+plots1:
+    file_name: narccap_prec_JJA_mean_taylor_diagram_to_cru
+
+use_subregions: False

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/narccap_tas_DJF_subregion_interannual_variability_portrait_diagram.yaml
----------------------------------------------------------------------
diff --git a/examples/narccap_tas_DJF_subregion_interannual_variability_portrait_diagram.yaml b/examples/narccap_tas_DJF_subregion_interannual_variability_portrait_diagram.yaml
new file mode 100644
index 0000000..de2d98e
--- /dev/null
+++ b/examples/narccap_tas_DJF_subregion_interannual_variability_portrait_diagram.yaml
@@ -0,0 +1,75 @@
+workdir: ./
+output_netcdf_filename: narccap_tas_DJF_mean_mean_1980-2003.nc
+
+# (RCMES will temporally subset data between month_start and month_end. If average_each_year is True (False), seasonal mean in each year is (not) calculated and used for metrics calculation.)
+time:
+    maximum_overlap_period: False
+    start_time: 1980-01-01
+    end_time: 2003-12-31
+    temporal_resolution: monthly
+    month_start: 12
+    month_end: 2
+    average_each_year: True
+
+space:
+    min_lat: 23.75
+    max_lat: 49.75
+    min_lon: -125.75
+    max_lon: -66.75
+
+regrid:
+    regrid_on_reference: False
+    regrid_dlat: 0.50
+    regrid_dlon: 0.50
+
+datasets:
+    reference:
+        data_source: rcmed
+        data_name: CRU
+        dataset_id: 10
+        parameter_id: 38
+
+    targets:
+        data_source: local
+        path: ./data/temp*ncep.monavg.nc                                                    
+        variable: temp    
+
+number_of_metrics_and_plots: 1
+
+metrics1: Portrait_diagram_subregion_interannual_variability
+
+plots1:
+    file_name: narccap_tas_DJF_subregion_interannual_variability_portrait_diagram
+
+use_subregions: True 
+
+subregions:
+#subregion name (R01, R02, R03,....) followed by an array of boundaries [south, north, west, east]
+    R01: 
+      [42.75, 49.75, -123.75, -120.25]
+    R02:
+      [42.75, 49.75, -119.75, -112.75]
+    R03:
+      [37.25, 42.25, -123.75, -117.75]
+    R04: 
+      [32.25, 37.25, -122.75, -114.75]
+    R05:
+      [31.25, 37.25, -113.75, -108.25]
+    R06:
+      [31.25, 37.25, -108.25, -99.75]
+    R07:
+      [37.25, 43.25, -110.25, -103.75]
+    R08: 
+      [45.25, 49.25, -99.75, -90.25]
+    R09: 
+      [34.75, 45.25, -99.75, -90.25]
+    R10: 
+      [29.75, 34.75, -95.75, -84.75]
+    R11: 
+      [38.25, 44.75, -89.75, -80.25]
+    R12: 
+      [38.25, 44.75, -79.75, -70.25]
+    R13: 
+      [30.75, 38.25, -83.75, -75.25]
+    R14: 
+      [24.25, 30.75, -83.75, -80.25]

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/knmi_to_cru31_full_bias.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/knmi_to_cru31_full_bias.py b/examples/old_examples/knmi_to_cru31_full_bias.py
new file mode 100644
index 0000000..a241442
--- /dev/null
+++ b/examples/old_examples/knmi_to_cru31_full_bias.py
@@ -0,0 +1,174 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import urllib
+from os import path
+
+import numpy as np
+
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+from ocw.dataset import Bounds as Bounds
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# This way we can easily adjust the time span of the retrievals
+YEARS = 3
+# Two Local Model Files 
+MODEL = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "cru_31_tmax_knmi_africa_bias_full"
+
+# Download necessary NetCDF file if not present
+if path.exists(MODEL):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + MODEL, MODEL)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+print("Loading %s into an OCW Dataset Object" % (MODEL,))
+knmi_dataset = local.load_file(MODEL, "tasmax")
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp")
+metadata = rcmed.get_parameters_metadata()
+
+cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0]
+
+""" The RCMED API uses the following function to query, subset and return the 
+raw data from the database:
+
+rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, 
+                        max_lon, start_time, end_time)
+
+The first two required params are in the cru_31 variable we defined earlier
+"""
+# Must cast to int since the rcmed api requires ints
+dataset_id = int(cru_31['dataset_id'])
+parameter_id = int(cru_31['parameter_id'])
+
+print("We are going to use the Model to constrain the Spatial Domain")
+#  The spatial_boundaries() function returns the spatial extent of the dataset
+print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
+      "%s\n" % (knmi_dataset.spatial_boundaries(), ))
+print("The KNMI_Dataset spatial resolution (lat_resolution, lon_resolution) is: \n"
+      "%s\n\n" % (knmi_dataset.spatial_resolution(), ))
+min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
+
+print("Calculating the Maximum Overlap in Time for the datasets")
+
+cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d")
+cru_end = datetime.datetime.strptime(cru_31['end_date'], "%Y-%m-%d")
+knmi_start, knmi_end = knmi_dataset.time_range()
+# Grab the Max Start Time
+start_time = max([cru_start, knmi_start])
+# Grab the Min End Time
+end_time = min([cru_end, knmi_end])
+print("Overlap computed to be: %s to %s" % (start_time.strftime("%Y-%m-%d"),
+                                          end_time.strftime("%Y-%m-%d")))
+print("We are going to grab the first %s year(s) of data" % YEARS)
+end_time = datetime.datetime(start_time.year + YEARS, start_time.month, start_time.day)
+print("Final Overlap is: %s to %s" % (start_time.strftime("%Y-%m-%d"),
+                                          end_time.strftime("%Y-%m-%d")))
+
+print("Fetching data from RCMED...")
+cru31_dataset = rcmed.parameter_dataset(dataset_id,
+                                        parameter_id,
+                                        min_lat,
+                                        max_lat,
+                                        min_lon,
+                                        max_lon,
+                                        start_time,
+                                        end_time)
+
+""" Step 3: Resample Datasets so they are the same shape """
+print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,))
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s" % (knmi_dataset.values.shape,))
+print("Our two datasets have a mis-match in time. We will subset on time to %s years\n" % YEARS)
+
+# Create a Bounds object to use for subsetting
+new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time)
+knmi_dataset = dsp.subset(new_bounds, knmi_dataset)
+
+print("CRU31_Dataset.values shape: (times, lats, lons) - %s" % (cru31_dataset.values.shape,))
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
+
+print("Temporally Rebinning the Datasets to a Single Timestep")
+# To run FULL temporal Rebinning use a timedelta > 366 days.  I used 999 in this example
+knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=999))
+cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=999))
+
+print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
+print("CRU31_Dataset.values shape: %s \n\n" % (cru31_dataset.values.shape,))
+ 
+""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """
+# Using the bounds we will create a new set of lats and lons on 0.5 degree step
+new_lons = np.arange(min_lon, max_lon, 0.5)
+new_lats = np.arange(min_lat, max_lat, 0.5)
+ 
+# Spatially regrid datasets using the new_lats, new_lons numpy arrays
+print("Spatially Regridding the KNMI_Dataset...")
+knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
+print("Spatially Regridding the CRU31_Dataset...")
+cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)
+print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, ))
+print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, ))
+ 
+""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+# You can build your own metrics, but OCW also ships with some common metrics
+print("Setting up a Bias metric to use for evaluation")
+bias = metrics.Bias()
+
+""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# The Evaluation Class Signature is:
+# Evaluation(reference, targets, metrics, subregions=None)
+# Evaluation can take in multiple targets and metrics, so we need to convert
+# our examples into Python lists.  Evaluation will iterate over the lists
+print("Making the Evaluation definition")
+bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias])
+print("Executing the Evaluation using the object's run() method")
+bias_evaluation.run()
+ 
+""" Step 6: Make a Plot from the Evaluation.results """
+# The Evaluation.results are a set of nested lists to support many different
+# possible Evaluation scenarios.
+#
+# The Evaluation results docs say:
+# The shape of results is (num_metrics, num_target_datasets) if no subregion
+# Accessing the actual results when we have used 1 metric and 1 dataset is
+# done this way:
+print("Accessing the Results of the Evaluation run")
+results = bias_evaluation.results[0][0]
+ 
+# From the bias output I want to make a Contour Map of the region
+print("Generating a contour map using ocw.plotter.draw_contour_map()")
+ 
+lats = new_lats
+lons = new_lons
+fname = OUTPUT_PLOT
+gridshape = (1, 1)  # Using a 1 x 1 since we have a single Bias for the full time range
+plot_title = "TASMAX Bias of KNMI Compared to CRU 3.1 (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
+sub_titles = ["Full Temporal Range"]
+ 
+plotter.draw_contour_map(results, lats, lons, fname,
+                         gridshape=gridshape, ptitle=plot_title, 
+                         subtitles=sub_titles)

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/model_ensemble_to_rcmed.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/model_ensemble_to_rcmed.py b/examples/old_examples/model_ensemble_to_rcmed.py
new file mode 100644
index 0000000..1f653a1
--- /dev/null
+++ b/examples/old_examples/model_ensemble_to_rcmed.py
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import math
+import urllib
+from os import path
+
+import numpy as np
+
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+from ocw.dataset import Bounds as Bounds
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# This way we can easily adjust the time span of the retrievals
+YEARS = 1
+# Two Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "tasmax_africa_bias_annual"
+
+# Download necessary NetCDF file if not present
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if path.exists(FILE_2):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+# Load local knmi model data
+knmi_dataset = local.load_file(FILE_1, "tasmax")
+knmi_dataset.name = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax"
+
+wrf311_dataset = local.load_file(FILE_2, "tasmax")
+wrf311_dataset.name = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax"
+
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp")
+metadata = rcmed.get_parameters_metadata()
+
+cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0]
+
+""" The RCMED API uses the following function to query, subset and return the 
+raw data from the database:
+
+rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, 
+                        max_lon, start_time, end_time)
+
+The first two required params are in the cru_31 variable we defined earlier
+"""
+# Must cast to int since the rcmed api requires ints
+dataset_id = int(cru_31['dataset_id'])
+parameter_id = int(cru_31['parameter_id'])
+
+#  The spatial_boundaries() function returns the spatial extent of the dataset
+min_lat, max_lat, min_lon, max_lon = wrf311_dataset.spatial_boundaries()
+
+#  There is a boundry alignment issue with the datasets.  To mitigate this
+#  we will use the math.floor() and math.ceil() functions to shrink the 
+#  boundries slighty.
+min_lat = math.ceil(min_lat)
+max_lat = math.floor(max_lat)
+min_lon = math.ceil(min_lon)
+max_lon = math.floor(max_lon)
+
+print("Calculating the Maximum Overlap in Time for the datasets")
+
+cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d")
+cru_end = datetime.datetime.strptime(cru_31['end_date'], "%Y-%m-%d")
+knmi_start, knmi_end = knmi_dataset.time_range()
+# Set the Time Range to be the year 1989
+start_time = datetime.datetime(1989,1,1)
+end_time = datetime.datetime(1989,12,1)
+
+print("Time Range is: %s to %s" % (start_time.strftime("%Y-%m-%d"),
+                                          end_time.strftime("%Y-%m-%d")))
+
+print("Fetching data from RCMED...")
+cru31_dataset = rcmed.parameter_dataset(dataset_id,
+                                        parameter_id,
+                                        min_lat,
+                                        max_lat,
+                                        min_lon,
+                                        max_lon,
+                                        start_time,
+                                        end_time)
+
+""" Step 3: Resample Datasets so they are the same shape """
+
+print("Temporally Rebinning the Datasets to an Annual Timestep")
+# To run annual temporal Rebinning use a timedelta of 360 days.
+knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=360))
+wrf311_dataset = dsp.temporal_rebin(wrf311_dataset, datetime.timedelta(days=360))
+cru31_dataset = dsp.temporal_rebin(cru31_dataset, datetime.timedelta(days=360))
+
+# Running Temporal Rebin early helps negate the issue of datasets being on different 
+# days of the month (1st vs. 15th)
+# Create a Bounds object to use for subsetting
+new_bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time)
+
+# Subset our model datasets so they are the same size
+knmi_dataset = dsp.subset(new_bounds, knmi_dataset)
+wrf311_dataset = dsp.subset(new_bounds, wrf311_dataset)
+
+""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """
+# Using the bounds we will create a new set of lats and lons on 1/2 degree step
+new_lons = np.arange(min_lon, max_lon, 0.5)
+new_lats = np.arange(min_lat, max_lat, 0.5)
+ 
+# Spatially regrid datasets using the new_lats, new_lons numpy arrays
+knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
+wrf311_dataset = dsp.spatial_regrid(wrf311_dataset, new_lats, new_lons)
+cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)
+
+# Generate an ensemble dataset from knmi and wrf models
+ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset])
+
+""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+print("Setting up a Bias metric to use for evaluation")
+bias = metrics.Bias()
+
+""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# The Evaluation Class Signature is:
+# Evaluation(reference, targets, metrics, subregions=None)
+# Evaluation can take in multiple targets and metrics, so we need to convert
+# our examples into Python lists.  Evaluation will iterate over the lists
+print("Making the Evaluation definition")
+bias_evaluation = evaluation.Evaluation(cru31_dataset, 
+                      [knmi_dataset, wrf311_dataset, ensemble_dataset],
+                      [bias])
+print("Executing the Evaluation using the object's run() method")
+bias_evaluation.run()
+ 
+""" Step 6: Make a Plot from the Evaluation.results """
+# The Evaluation.results are a set of nested lists to support many different
+# possible Evaluation scenarios.
+#
+# The Evaluation results docs say:
+# The shape of results is (num_target_datasets, num_metrics) if no subregion
+# Accessing the actual results when we have used 3 datasets and 1 metric is
+# done this way:
+print("Accessing the Results of the Evaluation run")
+results = bias_evaluation.results
+ 
+# From the bias output I want to make a Contour Map of the region
+print("Generating a contour map using ocw.plotter.draw_contour_map()")
+ 
+lats = new_lats
+lons = new_lons
+fname = OUTPUT_PLOT
+gridshape = (3, 1)  # Using a 3 x 1 since we have a 1 year of data for 3 models
+plotnames = ["KNMI", "WRF311", "ENSEMBLE"]
+for i, result in enumerate(results):
+  plot_title = "TASMAX Bias of CRU 3.1 vs. %s (%s - %s)" % (plotnames[i], start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
+  output_file = "%s_%s" % (fname, plotnames[i].lower())
+  print "creating %s" % (output_file,)
+  plotter.draw_contour_map(result[0], lats, lons, output_file,
+                         gridshape=gridshape, ptitle=plot_title)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/multi_model_evaluation.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/multi_model_evaluation.py b/examples/old_examples/multi_model_evaluation.py
new file mode 100644
index 0000000..8136001
--- /dev/null
+++ b/examples/old_examples/multi_model_evaluation.py
@@ -0,0 +1,151 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import numpy as np
+from os import path
+
+
+#import Apache OCW dependences
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+from ocw.dataset import Bounds as Bounds
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+import ssl
+if hasattr(ssl, '_create_unverified_context'):
+  ssl._create_default_https_context = ssl._create_unverified_context
+  
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "pr_africa_bias_annual"
+#variable that we are analyzing
+varName = 'pr' 
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 1, 1)
+END = datetime.datetime(2007, 12, 31)
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+#regridding parameters
+gridLonStep=0.5
+gridLatStep=0.5
+
+#list for all target_datasets
+target_datasets =[]
+#list for names for all the datasets
+allNames =[]
+
+
+# Download necessary NetCDF file if not present
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if path.exists(FILE_2):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if path.exists(FILE_3):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="UC"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+""" Step 3: Resample Datasets so they are the same shape """
+print("Resampling datasets")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
+
+for member, each_target_dataset in enumerate(target_datasets):
+  target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
+  target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+  target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30))    
+    
+
+""" Spatially Regrid the Dataset Objects to a user defined  grid """
+# Using the bounds we will create a new set of lats and lons 
+print("Regridding datasets")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+for member, each_target_dataset in enumerate(target_datasets):
+  target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+#find the mean value
+#way to get the mean. Note the function exists in util.py 
+_, CRU31.values = utils.calc_climatology_year(CRU31)
+CRU31.values = np.expand_dims(CRU31.values, axis=0)
+
+for member, each_target_dataset in enumerate(target_datasets):
+  _,target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
+  target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)
+
+
+for target in target_datasets:
+  allNames.append(target.name)
+
+#determine the metrics
+mean_bias = metrics.Bias()
+
+#create the Evaluation object
+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
+                                    # list of target datasets for the evaluation
+                                    target_datasets,
+                                    # 1 or more metrics to use in the evaluation
+                                    [mean_bias])   
+RCMs_to_CRU_evaluation.run()
+
+#extract the relevant data from RCMs_to_CRU_evaluation.results 
+#the results returns a list (num_target_datasets, num_metrics). See docs for further details
+rcm_bias = RCMs_to_CRU_evaluation.results[:][0] 
+#remove the metric dimension
+new_rcm_bias = np.squeeze(np.array(RCMs_to_CRU_evaluation.results))
+
+plotter.draw_contour_map(new_rcm_bias, new_lats, new_lons, gridshape=(2, 5),fname=OUTPUT_PLOT, subtitles=allNames, cmap='coolwarm_r')

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/multi_model_taylor_diagram.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/multi_model_taylor_diagram.py b/examples/old_examples/multi_model_taylor_diagram.py
new file mode 100644
index 0000000..f91ab3e
--- /dev/null
+++ b/examples/old_examples/multi_model_taylor_diagram.py
@@ -0,0 +1,144 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+
+from os import path
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "pr_africa_taylor"
+
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 01, 1)
+END = datetime.datetime(2007, 12, 31)
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+#variable that we are analyzing
+varName = 'pr' 
+
+#regridding parameters
+gridLonStep=0.5
+gridLatStep=0.5
+
+#some vars for this evaluation
+target_datasets_ensemble=[]
+target_datasets =[]
+ref_datasets =[]
+
+# Download necessary NetCDF file if not present
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if path.exists(FILE_2):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if path.exists(FILE_3):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="REGM3"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+""" Step 3: Resample Datasets so they are the same shape """
+print("Resampling datasets ...")
+print("... on units")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+print("... temporal")
+CRU31 = dsp.temporal_rebin(CRU31, datetime.timedelta(days=30))
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+	target_datasets[member] = dsp.temporal_rebin(target_datasets[member], datetime.timedelta(days=30)) 
+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])	
+	
+#Regrid
+print("... regrid")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+	
+#find the mean values
+#way to get the mean. Note the function exists in util.py as def calc_climatology_year(dataset):
+CRU31.values,_ = utils.calc_climatology_year(CRU31)
+CRU31.values = np.expand_dims(CRU31.values, axis=0)
+
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member].values,_ = utils.calc_climatology_year(target_datasets[member])
+	target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)
+	
+allNames =[]
+
+for target in target_datasets:
+	allNames.append(target.name)
+
+#calculate the metrics
+pattern_correlation = metrics.PatternCorrelation()
+spatial_std_dev = metrics.StdDevRatio()
+
+
+#create the Evaluation object
+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
+                                    # 1 or more target datasets for the evaluation                
+                                    target_datasets,
+                                    # 1 or more metrics to use in the evaluation
+                                    [spatial_std_dev, pattern_correlation])#, mean_bias,spatial_std_dev_ratio, pattern_correlation])   
+RCMs_to_CRU_evaluation.run()
+
+rcm_std_dev = [results[0] for results in RCMs_to_CRU_evaluation.results]
+rcm_pat_cor = [results[1] for results in RCMs_to_CRU_evaluation.results]
+
+taylor_data = np.array([rcm_std_dev, rcm_pat_cor]).transpose()
+
+new_taylor_data = np.squeeze(np.array(taylor_data))
+
+plotter.draw_taylor_diagram(new_taylor_data,
+                        allNames, 
+                        "CRU31",
+                        fname=OUTPUT_PLOT,
+                        fmt='png',
+                        frameon=False)
+
+                              
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/simple_model_to_model_bias.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/simple_model_to_model_bias.py b/examples/old_examples/simple_model_to_model_bias.py
new file mode 100644
index 0000000..635e872
--- /dev/null
+++ b/examples/old_examples/simple_model_to_model_bias.py
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from os import path
+import urllib
+
+import numpy as np
+
+import ocw.data_source.local as local
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Two Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
+
+FILE_1_PATH = path.join('/tmp', FILE_1)
+FILE_2_PATH = path.join('/tmp', FILE_2)
+
+if not path.exists(FILE_1_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
+if not path.exists(FILE_2_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
+
+""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
+print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
+knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
+
+print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
+wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
+print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))
+
+""" Step 2: Temporally Rebin the Data into an Annual Timestep """
+print("Temporally Rebinning the Datasets to an Annual Timestep")
+knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=365))
+wrf_dataset = dsp.temporal_rebin(wrf_dataset, datetime.timedelta(days=365))
+print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
+print("WRF_Dataset.values shape: %s \n\n" % (wrf_dataset.values.shape,))
+
+""" Step 3: Spatially Regrid the Dataset Objects to a 1 degree grid """
+#  The spatial_boundaries() function returns the spatial extent of the dataset
+print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
+      "%s\n" % (knmi_dataset.spatial_boundaries(), ))
+print("The KNMI_Dataset spatial resolution (lat_resolution, lon_resolution) is: \n"
+      "%s\n\n" % (knmi_dataset.spatial_resolution(), ))
+
+min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
+
+# Using the bounds we will create a new set of lats and lons on 1 degree step
+new_lons = np.arange(min_lon, max_lon, 1)
+new_lats = np.arange(min_lat, max_lat, 1)
+
+# Spatially regrid datasets using the new_lats, new_lons numpy arrays
+print("Spatially Regridding the KNMI_Dataset...")
+knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
+print("Final shape of the KNMI_Dataset: \n"
+      "%s\n" % (knmi_dataset.values.shape, ))
+print("Spatially Regridding the WRF_Dataset...")
+wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
+print("Final shape of the WRF_Dataset: \n"
+      "%s\n" % (wrf_dataset.values.shape, ))
+
+""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
+# You can build your own metrics, but OCW also ships with some common metrics
+print("Setting up a Bias metric to use for evaluation")
+bias = metrics.Bias()
+
+""" Step 5: Create an Evaluation Object using Datasets and our Metric """
+# The Evaluation Class Signature is:
+# Evaluation(reference, targets, metrics, subregions=None)
+# Evaluation can take in multiple targets and metrics, so we need to convert
+# our examples into Python lists.  Evaluation will iterate over the lists
+print("Making the Evaluation definition")
+bias_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [bias])
+print("Executing the Evaluation using the object's run() method")
+bias_evaluation.run()
+
+""" Step 6: Make a Plot from the Evaluation.results """
+# The Evaluation.results are a set of nested lists to support many different
+# possible Evaluation scenarios.
+#
+# The Evaluation results docs say:
+# The shape of results is (num_metrics, num_target_datasets) if no subregion
+# Accessing the actual results when we have used 1 metric and 1 dataset is
+# done this way:
+print("Accessing the Results of the Evaluation run")
+results = bias_evaluation.results[0][0]
+print("The results are of type: %s" % type(results))
+
+# From the bias output I want to make a Contour Map of the region
+print("Generating a contour map using ocw.plotter.draw_contour_map()")
+
+lats = new_lats
+lons = new_lons
+fname = OUTPUT_PLOT
+gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
+plot_title = "TASMAX Bias of WRF Compared to KNMI (1989 - 2008)"
+sub_titles = range(1989, 2009, 1)
+
+plotter.draw_contour_map(results, lats, lons, fname, 
+                         gridshape=gridshape, ptitle=plot_title, 
+                         subtitles=sub_titles)

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/simple_model_to_model_bias_DJF_and_JJA.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/simple_model_to_model_bias_DJF_and_JJA.py b/examples/old_examples/simple_model_to_model_bias_DJF_and_JJA.py
new file mode 100644
index 0000000..364498a
--- /dev/null
+++ b/examples/old_examples/simple_model_to_model_bias_DJF_and_JJA.py
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from os import path
+import urllib
+
+import numpy as np
+
+import ocw.data_source.local as local
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Two Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
+
+FILE_1_PATH = path.join('/tmp', FILE_1)
+FILE_2_PATH = path.join('/tmp', FILE_2)
+
+if not path.exists(FILE_1_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
+if not path.exists(FILE_2_PATH):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
+
+""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
+print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
+knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
+print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,))
+
+print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
+wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
+print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_dataset.values.shape,))
+
+""" Step 2: Calculate seasonal average """
+print("Calculate seasonal average")
+knmi_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=knmi_dataset))
+wrf_DJF_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=12, month_end=2, target_dataset=wrf_dataset))
+print("Seasonally averaged KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_DJF_mean.shape,))
+print("Seasonally averaged wrf_Dataset.values shape: (times, lats, lons) - %s \n" % (wrf_DJF_mean.shape,))
+knmi_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=knmi_dataset))
+wrf_JJA_mean = utils.calc_temporal_mean(dsp.temporal_subset(month_start=6, month_end=8, target_dataset=wrf_dataset))
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/simple_model_tstd.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/simple_model_tstd.py b/examples/old_examples/simple_model_tstd.py
new file mode 100644
index 0000000..4c87813
--- /dev/null
+++ b/examples/old_examples/simple_model_tstd.py
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from os import path
+import urllib
+
+import ocw.data_source.local as local
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# One Local Model Files
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "knmi_temporal_std"
+
+# Download necessary NetCDF file if needed
+if path.exists(FILE_1):
+    pass
+else:
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects """
+print "Loading %s into an OCW Dataset Object" % (FILE_1,)
+# 'tasmax' is variable name of values
+knmi_dataset = local.load_file(FILE_1, "tasmax")
+
+print "KNMI_Dataset.values shape: (times, lats, lons) - %s \n" % (knmi_dataset.values.shape,)
+
+# Acessing latittudes and longitudes of netCDF file
+lats = knmi_dataset.lats
+lons = knmi_dataset.lons
+
+""" Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example """
+# You can build your own metrics, but OCW also ships with some common metrics
+print "Setting up a Temporal STD metric to use for evaluation"
+std = metrics.TemporalStdDev()
+
+""" Step 3: Create an Evaluation Object using Datasets and our Metric """
+# The Evaluation Class Signature is:
+# Evaluation(reference, targets, metrics, subregions=None)
+# Evaluation can take in multiple targets and metrics, so we need to convert
+# our examples into Python lists.  Evaluation will iterate over the lists
+print "Making the Evaluation definition"
+# Temporal STD Metric gets one target dataset then reference dataset should be None
+std_evaluation = evaluation.Evaluation(None, [knmi_dataset], [std])
+print "Executing the Evaluation using the object's run() method"
+std_evaluation.run()
+
+""" Step 4: Make a Plot from the Evaluation.results """
+# The Evaluation.results are a set of nested lists to support many different
+# possible Evaluation scenarios.
+#
+# The Evaluation results docs say:
+# The shape of results is (num_metrics, num_target_datasets) if no subregion
+# Accessing the actual results when we have used 1 metric and 1 dataset is
+# done this way:
+print "Accessing the Results of the Evaluation run"
+results = std_evaluation.unary_results[0][0]
+print "The results are of type: %s" % type(results)
+
+# From the temporal std output I want to make a Contour Map of the region
+print "Generating a contour map using ocw.plotter.draw_contour_map()"
+
+fname = OUTPUT_PLOT
+gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
+plot_title = "TASMAX Temporal Standard Deviation (1989 - 2008)"
+sub_titles = range(1989, 2009, 1)
+
+plotter.draw_contour_map(results, lats, lons, fname,
+                         gridshape=gridshape, ptitle=plot_title,
+                         subtitles=sub_titles)

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/subregions.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/subregions.py b/examples/old_examples/subregions.py
new file mode 100644
index 0000000..20aaee9
--- /dev/null
+++ b/examples/old_examples/subregions.py
@@ -0,0 +1,53 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+import numpy.ma as ma
+
+OUTPUT_PLOT = "subregions"
+
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START_SUB = datetime.datetime(2000, 01, 1)
+END_SUB = datetime.datetime(2007, 12, 31)
+
+#regridding parameters
+gridLonStep=0.5
+gridLatStep=0.5
+
+#Regrid
+print("... regrid")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+
+list_of_regions = [
+ Bounds(-10.0, 0.0, 29.0, 36.5, START_SUB, END_SUB), 
+ Bounds(0.0, 10.0,  29.0, 37.5, START_SUB, END_SUB),
+ Bounds(10.0, 20.0, 25.0, 32.5, START_SUB, END_SUB),
+ Bounds(20.0, 33.0, 25.0, 32.5, START_SUB, END_SUB),
+ Bounds(-19.3,-10.2,12.0, 20.0, START_SUB, END_SUB),
+ Bounds( 15.0, 30.0, 15.0, 25.0,START_SUB, END_SUB),
+ Bounds(-10.0, 10.0, 7.3, 15.0, START_SUB, END_SUB),
+ Bounds(-10.9, 10.0, 5.0, 7.3,  START_SUB, END_SUB),
+ Bounds(33.9, 40.0,  6.9, 15.0, START_SUB, END_SUB),
+ Bounds(10.0, 25.0,  0.0, 10.0, START_SUB, END_SUB),
+ Bounds(10.0, 25.0,-10.0,  0.0, START_SUB, END_SUB),
+ Bounds(30.0, 40.0,-15.0,  0.0, START_SUB, END_SUB),
+ Bounds(33.0, 40.0, 25.0, 35.0, START_SUB, END_SUB)]
+
+#for plotting the subregions
+plotter.draw_subregions(list_of_regions, new_lats, new_lons, OUTPUT_PLOT, fmt='png')
+
+                               
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/subregions_portrait_diagram.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/subregions_portrait_diagram.py b/examples/old_examples/subregions_portrait_diagram.py
new file mode 100644
index 0000000..075de2d
--- /dev/null
+++ b/examples/old_examples/subregions_portrait_diagram.py
@@ -0,0 +1,139 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+import numpy.ma as ma
+
+from os import path
+import urllib
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+# Filename for the output image/plot (without file extension)
+OUTPUT_PLOT = "portrait_diagram"
+
+# Spatial and temporal configurations
+LAT_MIN = -45.0 
+LAT_MAX = 42.24
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 01, 1)
+END = datetime.datetime(2007, 12, 31)
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+#variable that we are analyzing
+varName = 'pr' 
+
+#regridding parameters
+gridLonStep = 0.5
+gridLatStep = 0.5
+
+#some vars for this evaluation
+target_datasets_ensemble = []
+target_datasets = []
+allNames = []
+
+# Download necessary NetCDF file if not present
+if not path.exists(FILE_1):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if not path.exists(FILE_2):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if not path.exists(FILE_3):
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+""" Step 3: Processing Datasets so they are the same shape """
+print("Processing datasets ...")
+CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
+print("... on units")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])	
+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+	target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly') 		
+		
+print("... spatial regridding")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+	
+#find the total annual mean. Note the function exists in util.py as def calc_climatology_year(dataset):
+_,CRU31.values = utils.calc_climatology_year(CRU31)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	_, target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
+
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+for target in target_datasets:
+	allNames.append(target.name)
+
+list_of_regions = [
+ Bounds(-10.0, 0.0, 29.0, 36.5), 
+ Bounds(0.0, 10.0,  29.0, 37.5), 
+ Bounds(10.0, 20.0, 25.0, 32.5), 
+ Bounds(20.0, 33.0, 25.0, 32.5), 
+ Bounds(-19.3,-10.2,12.0, 20.0), 
+ Bounds( 15.0, 30.0, 15.0, 25.0),
+ Bounds(-10.0, 10.0, 7.3, 15.0), 
+ Bounds(-10.9, 10.0, 5.0, 7.3),  
+ Bounds(33.9, 40.0,  6.9, 15.0), 
+ Bounds(10.0, 25.0,  0.0, 10.0), 
+ Bounds(10.0, 25.0,-10.0,  0.0), 
+ Bounds(30.0, 40.0,-15.0,  0.0), 
+ Bounds(33.0, 40.0, 25.0, 35.00)]
+
+region_list=["R"+str(i+1) for i in xrange(13)]
+
+#metrics
+pattern_correlation = metrics.PatternCorrelation()
+
+#create the Evaluation object
+RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
+                                    # 1 or more target datasets for the evaluation
+                                    target_datasets,
+                                    # 1 or more metrics to use in the evaluation
+                                    [pattern_correlation], 
+                                    # list of subregion Bounds Objects
+                                    list_of_regions)   
+RCMs_to_CRU_evaluation.run()
+
+new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)
+
+plotter.draw_portrait_diagram(new_patcor,allNames, region_list, fname=OUTPUT_PLOT, fmt='png', cmap='coolwarm_r')
+
+                              
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/taylor_diagram_example.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/taylor_diagram_example.py b/examples/old_examples/taylor_diagram_example.py
new file mode 100644
index 0000000..b08502e
--- /dev/null
+++ b/examples/old_examples/taylor_diagram_example.py
@@ -0,0 +1,113 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import sys
+from os import path
+import urllib
+
+import numpy
+
+from ocw.dataset import Bounds
+import ocw.data_source.local as local
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
+FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
+
+# Download some example NetCDF files for the evaluation
+################################################################################
+if not path.exists(FILE_1):
+    urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if not path.exists(FILE_2):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+# Load the example datasets into OCW Dataset objects. We want to load
+# the 'tasmax' variable values. We'll also name the datasets for use
+# when plotting.
+################################################################################
+knmi_dataset = local.load_file(FILE_1, "tasmax")
+wrf_dataset = local.load_file(FILE_2, "tasmax")
+
+knmi_dataset.name = "knmi"
+wrf_dataset.name = "wrf"
+
+# Date values from loaded datasets might not always fall on reasonable days.
+# With monthly data, we could have data falling on the 1st, 15th, or some other
+# day of the month. Let's fix that real quick.
+################################################################################
+knmi_dataset = dsp.normalize_dataset_datetimes(knmi_dataset, 'monthly')
+wrf_dataset = dsp.normalize_dataset_datetimes(wrf_dataset, 'monthly')
+
+# We're only going to run this evaluation over a years worth of data. We'll
+# make a Bounds object and use it to subset our datasets.
+################################################################################
+subset = Bounds(-45, 42, -24, 60, datetime.datetime(1989, 1, 1), datetime.datetime(1989, 12, 1))
+knmi_dataset = dsp.subset(subset, knmi_dataset)
+wrf_dataset = dsp.subset(subset, wrf_dataset)
+
+# Temporally re-bin the data into a monthly timestep.
+################################################################################
+knmi_dataset = dsp.temporal_rebin(knmi_dataset, datetime.timedelta(days=30))
+wrf_dataset = dsp.temporal_rebin(wrf_dataset, datetime.timedelta(days=30))
+
+# Spatially regrid the datasets onto a 1 degree grid.
+################################################################################
+# Get the bounds of the reference dataset and use it to create a new
+# set of lat/lon values on a 1 degree step
+# Using the bounds we will create a new set of lats and lons on 1 degree step
+min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
+new_lons = numpy.arange(min_lon, max_lon, 1)
+new_lats = numpy.arange(min_lat, max_lat, 1)
+
+# Spatially regrid datasets using the new_lats, new_lons numpy arrays
+knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
+wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
+
+# Load the metrics that we want to use for the evaluation.
+################################################################################
+sstdr = metrics.StdDevRatio()
+pc = metrics.PatternCorrelation()
+
+# Create our new evaluation object. The knmi dataset is the evaluations
+# reference dataset. We then provide a list of 1 or more target datasets
+# to use for the evaluation. In this case, we only want to use the wrf dataset.
+# Then we pass a list of all the metrics that we want to use in the evaluation.
+################################################################################
+test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [sstdr, pc])
+test_evaluation.run()
+
+# Pull our the evaluation results and prepare them for drawing a Taylor diagram.
+################################################################################
+spatial_stddev_ratio = test_evaluation.results[0][0]
+spatial_correlation = test_evaluation.results[0][1]
+
+taylor_data = numpy.array([[spatial_stddev_ratio], [spatial_correlation]]).transpose()
+
+# Draw our taylor diagram!
+################################################################################
+plotter.draw_taylor_diagram(taylor_data,
+                            [wrf_dataset.name],
+                            knmi_dataset.name,
+                            fname='taylor_plot',
+                            fmt='png',
+                            frameon=False)

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/old_examples/time_series_with_regions.py
----------------------------------------------------------------------
diff --git a/examples/old_examples/time_series_with_regions.py b/examples/old_examples/time_series_with_regions.py
new file mode 100644
index 0000000..1d552a8
--- /dev/null
+++ b/examples/old_examples/time_series_with_regions.py
@@ -0,0 +1,141 @@
+#Apache OCW lib immports
+from ocw.dataset import Dataset, Bounds
+import ocw.data_source.local as local
+import ocw.data_source.rcmed as rcmed
+import ocw.dataset_processor as dsp
+import ocw.evaluation as evaluation
+import ocw.metrics as metrics
+import ocw.plotter as plotter
+import ocw.utils as utils
+
+import datetime
+import numpy as np
+import numpy.ma as ma
+from os import path
+import urllib
+
+# File URL leader
+FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
+# Three Local Model Files 
+FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+FILE_2 = "AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc"
+FILE_3 = "AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc"
+
+LAT_MIN = -45.0 
+LAT_MAX = 42.24 
+LON_MIN = -24.0
+LON_MAX = 60.0 
+START = datetime.datetime(2000, 01, 1)
+END = datetime.datetime(2007, 12, 31)
+
+EVAL_BOUNDS = Bounds(LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+varName = 'pr' 
+gridLonStep=0.44
+gridLatStep=0.44
+
+#needed vars for the script
+target_datasets =[]
+tSeries =[]
+results =[]
+labels =[] # could just as easily b the names for each subregion
+region_counter = 0
+
+# Download necessary NetCDF file if not present
+if not path.exists(FILE_1):
+	urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1)
+
+if not path.exists(FILE_2):
+    urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2)
+
+if not path.exists(FILE_3):
+    urllib.urlretrieve(FILE_LEADER + FILE_3, FILE_3)
+
+""" Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list"""
+target_datasets.append(local.load_file(FILE_1, varName, name="KNMI"))
+target_datasets.append(local.load_file(FILE_2, varName, name="REGCM"))
+target_datasets.append(local.load_file(FILE_3, varName, name="UCT"))
+
+
+""" Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module """
+print("Working with the rcmed interface to get CRU3.1 Daily Precipitation")
+# the dataset_id and the parameter id were determined from  
+# https://rcmes.jpl.nasa.gov/content/data-rcmes-database 
+CRU31 = rcmed.parameter_dataset(10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
+
+
+""" Step 3: Processing datasets so they are the same shape ... """
+print("Processing datasets so they are the same shape")
+CRU31 = dsp.water_flux_unit_conversion(CRU31)
+CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.subset(EVAL_BOUNDS, target_datasets[member])
+	target_datasets[member] = dsp.water_flux_unit_conversion(target_datasets[member])
+	target_datasets[member] = dsp.normalize_dataset_datetimes(target_datasets[member], 'monthly')  		
+	
+print("... spatial regridding")
+new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
+new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
+CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
+
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member] = dsp.spatial_regrid(target_datasets[member], new_lats, new_lons)
+
+#find climatology monthly for obs and models
+CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)
+
+for member, each_target_dataset in enumerate(target_datasets):
+	target_datasets[member].values, target_datasets[member].times = utils.calc_climatology_monthly(target_datasets[member])
+		
+#make the model ensemble
+target_datasets_ensemble = dsp.ensemble(target_datasets)
+target_datasets_ensemble.name="ENS"
+
+#append to the target_datasets for final analysis
+target_datasets.append(target_datasets_ensemble)
+
+""" Step 4: Subregion stuff """
+list_of_regions = [
+ Bounds(-10.0, 0.0, 29.0, 36.5), 
+ Bounds(0.0, 10.0,  29.0, 37.5), 
+ Bounds(10.0, 20.0, 25.0, 32.5),
+ Bounds(20.0, 33.0, 25.0, 32.5), 
+ Bounds(-19.3,-10.2,12.0, 20.0), 
+ Bounds( 15.0, 30.0, 15.0, 25.0),
+ Bounds(-10.0, 10.0, 7.3, 15.0), 
+ Bounds(-10.9, 10.0, 5.0, 7.3),  
+ Bounds(33.9, 40.0,  6.9, 15.0),
+ Bounds(10.0, 25.0,  0.0, 10.0), 
+ Bounds(10.0, 25.0,-10.0,  0.0), 
+ Bounds(30.0, 40.0,-15.0,  0.0), 
+ Bounds(33.0, 40.0, 25.0, 35.0)]
+
+region_list=[["R"+str(i+1)] for i in xrange(13)]
+
+for regions in region_list:
+	firstTime = True
+	subset_name = regions[0]+"_CRU31"
+	#labels.append(subset_name) #for legend, uncomment this line
+	subset = dsp.subset(list_of_regions[region_counter], CRU31, subset_name)
+	tSeries = utils.calc_time_series(subset)
+	results.append(tSeries)
+	tSeries=[]
+	firstTime = False
+	for member, each_target_dataset in enumerate(target_datasets):
+		subset_name = regions[0]+"_"+target_datasets[member].name
+		#labels.append(subset_name) #for legend, uncomment this line
+		subset = dsp.subset(list_of_regions[region_counter],target_datasets[member],subset_name)
+		tSeries = utils.calc_time_series(subset)
+		results.append(tSeries)
+		tSeries=[]
+	
+	plotter.draw_time_series(np.array(results), CRU31.times, labels, regions[0], ptitle=regions[0],fmt='png')
+	results =[]
+	tSeries =[]
+	labels =[]
+	region_counter+=1
+			
+                               
+

http://git-wip-us.apache.org/repos/asf/climate/blob/7396ffc8/examples/run_RCMES.py
----------------------------------------------------------------------
diff --git a/examples/run_RCMES.py b/examples/run_RCMES.py
index 33c7cb9..9039486 100644
--- a/examples/run_RCMES.py
+++ b/examples/run_RCMES.py
@@ -86,9 +86,12 @@ if ref_data_info['data_source'] == 'rcmed':
     min_lon = np.max([min_lon, ref_dataset.lons.min()])
     max_lon = np.min([max_lon, ref_dataset.lons.max()])
 bounds = Bounds(min_lat, max_lat, min_lon, max_lon, start_time, end_time)
-ref_dataset = dsp.subset(bounds,ref_dataset)
+
+if ref_dataset.lats.ndim !=2 and ref_dataset.lons.ndim !=2:
+    ref_dataset = dsp.subset(bounds,ref_dataset)
 for idata,dataset in enumerate(model_datasets):
-    model_datasets[idata] = dsp.subset(bounds,dataset)
+    if dataset.lats.ndim !=2 and dataset.lons.ndim !=2:
+        model_datasets[idata] = dsp.subset(bounds,dataset)
 
 # Temporaly subset both observation and model datasets for the user specified season
 month_start = time_info['month_start']