You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by bu...@apache.org on 2016/07/27 17:45:59 UTC

svn commit: r993910 [9/11] - in /websites/staging/climate/trunk/content: ./ api/ api/1.0.0/ api/1.1.0/ api/1.1.0/_sources/ api/1.1.0/_sources/config/ api/1.1.0/_sources/data_source/ api/1.1.0/_sources/ocw/ api/1.1.0/_sources/ui-backend/ api/1.1.0/_stat...

Added: websites/staging/climate/trunk/content/api/1.1.0/ocw/metrics.html
==============================================================================
--- websites/staging/climate/trunk/content/api/1.1.0/ocw/metrics.html (added)
+++ websites/staging/climate/trunk/content/api/1.1.0/ocw/metrics.html Wed Jul 27 17:45:58 2016
@@ -0,0 +1,578 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+  <head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    
+    <title>Metrics Module &mdash; Apache Open Climate Workbench 1.1.0 documentation</title>
+    
+    <link rel="stylesheet" href="../_static/alabaster.css" type="text/css" />
+    <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+    
+    <script type="text/javascript">
+      var DOCUMENTATION_OPTIONS = {
+        URL_ROOT:    '../',
+        VERSION:     '1.1.0',
+        COLLAPSE_INDEX: false,
+        FILE_SUFFIX: '.html',
+        HAS_SOURCE:  true
+      };
+    </script>
+    <script type="text/javascript" src="../_static/jquery.js"></script>
+    <script type="text/javascript" src="../_static/underscore.js"></script>
+    <script type="text/javascript" src="../_static/doctools.js"></script>
+    <link rel="top" title="Apache Open Climate Workbench 1.1.0 documentation" href="../index.html" />
+    <link rel="next" title="Plotter Module" href="plotter.html" />
+    <link rel="prev" title="Evaluation Module" href="evaluation.html" />
+   
+  
+  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9">
+
+  </head>
+  <body role="document">
+    <div class="related" role="navigation" aria-label="related navigation">
+      <h3>Navigation</h3>
+      <ul>
+        <li class="right" style="margin-right: 10px">
+          <a href="../genindex.html" title="General Index"
+             accesskey="I">index</a></li>
+        <li class="right" >
+          <a href="../http-routingtable.html" title="HTTP Routing Table"
+             >routing table</a> |</li>
+        <li class="right" >
+          <a href="../py-modindex.html" title="Python Module Index"
+             >modules</a> |</li>
+        <li class="right" >
+          <a href="plotter.html" title="Plotter Module"
+             accesskey="N">next</a> |</li>
+        <li class="right" >
+          <a href="evaluation.html" title="Evaluation Module"
+             accesskey="P">previous</a> |</li>
+        <li class="nav-item nav-item-0"><a href="../index.html">Apache Open Climate Workbench 1.1.0 documentation</a> &raquo;</li> 
+      </ul>
+    </div>  
+
+    <div class="document">
+      <div class="documentwrapper">
+        <div class="bodywrapper">
+          <div class="body" role="main">
+            
+  <div class="section" id="module-metrics">
+<span id="metrics-module"></span><h1>Metrics Module<a class="headerlink" href="#module-metrics" title="Permalink to this headline">¶</a></h1>
+<dl class="docutils">
+<dt>Classes:</dt>
+<dd>Metric - Abstract Base Class from which all metrics must inherit.</dd>
+</dl>
+<dl class="class">
+<dt id="metrics.Bias">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">Bias</code><a class="headerlink" href="#metrics.Bias" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the bias between a reference and target dataset.</p>
+<dl class="method">
+<dt id="metrics.Bias.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.Bias.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the bias between a reference and target dataset.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric run.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">The difference between the reference and target datasets.</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last"><a class="reference external" href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray" title="(in NumPy v1.11)"><code class="xref py py-class docutils literal"><span class="pre">numpy.ndarray</span></code></a></p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.BinaryMetric">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">BinaryMetric</code><a class="headerlink" href="#metrics.BinaryMetric" title="Permalink to this definition">¶</a></dt>
+<dd><p>Abstract Base Class from which all binary metrics inherit.</p>
+<dl class="method">
+<dt id="metrics.BinaryMetric.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.BinaryMetric.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Run the metric for the given reference and target datasets.</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The Dataset to use as the reference dataset when
+running the evaluation.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The Dataset to use as the target dataset when
+running the evaluation.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The result of evaluation the metric on the reference and 
+target dataset.</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.Metric">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">Metric</code><a class="headerlink" href="#metrics.Metric" title="Permalink to this definition">¶</a></dt>
+<dd><p>Base Metric Class</p>
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.PatternCorrelation">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">PatternCorrelation</code><a class="headerlink" href="#metrics.PatternCorrelation" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the correlation coefficient between two datasets</p>
+<dl class="method">
+<dt id="metrics.PatternCorrelation.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.PatternCorrelation.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the correlation coefficient between two dataset.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric run.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The correlation coefficient between a reference and target dataset.</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.RMSError">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">RMSError</code><a class="headerlink" href="#metrics.RMSError" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the Root Mean Square Difference (RMS Error), with the mean
+calculated over time and space.</p>
+<dl class="method">
+<dt id="metrics.RMSError.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>reference_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.RMSError.run" title="Permalink to this definition">¶</a></dt>
+<dd><dl class="docutils">
+<dt>Calculate the Root Mean Square Difference (RMS Error), with the mean</dt>
+<dd>calculated over time and space.</dd>
+</dl>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>reference_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric
+run</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The RMS error, with the mean calculated over time and space</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.SpatialPatternTaylorDiagram">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">SpatialPatternTaylorDiagram</code><a class="headerlink" href="#metrics.SpatialPatternTaylorDiagram" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the target to reference ratio of spatial standard deviation and pattern correlation</p>
+<dl class="method">
+<dt id="metrics.SpatialPatternTaylorDiagram.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.SpatialPatternTaylorDiagram.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate two metrics to plot a Taylor diagram to compare spatial patterns</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric run.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">standard deviation ratio, pattern correlation coefficient</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:float:&#8217;float&#8217;,&#8217;float&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.StdDevRatio">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">StdDevRatio</code><a class="headerlink" href="#metrics.StdDevRatio" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the standard deviation ratio between two datasets.</p>
+<dl class="method">
+<dt id="metrics.StdDevRatio.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.StdDevRatio.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the standard deviation ratio.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric run.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The standard deviation ratio of the reference and target</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.TemporalCorrelation">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">TemporalCorrelation</code><a class="headerlink" href="#metrics.TemporalCorrelation" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the temporal correlation coefficients and associated
+confidence levels between two datasets, using Pearson&#8217;s correlation.</p>
+<dl class="method">
+<dt id="metrics.TemporalCorrelation.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>reference_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.TemporalCorrelation.run" title="Permalink to this definition">¶</a></dt>
+<dd><dl class="docutils">
+<dt>Calculate the temporal correlation coefficients and associated</dt>
+<dd>confidence levels between two datasets, using Pearson&#8217;s correlation.</dd>
+</dl>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>reference_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric
+run</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">A 2D array of temporal correlation coefficients and a 2D
+array of confidence levels associated with the temporal correlation
+coefficients</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.TemporalMeanBias">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">TemporalMeanBias</code><a class="headerlink" href="#metrics.TemporalMeanBias" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the bias averaged over time.</p>
+<dl class="method">
+<dt id="metrics.TemporalMeanBias.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>ref_dataset</em>, <em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.TemporalMeanBias.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the bias averaged over time.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides BinaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>ref_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The reference dataset to use in this metric run.</li>
+<li><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target dataset to evaluate against the
+reference dataset in this metric run.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The mean bias between a reference and target dataset over time.</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.TemporalStdDev">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">TemporalStdDev</code><a class="headerlink" href="#metrics.TemporalStdDev" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the standard deviation over the time.</p>
+<dl class="method">
+<dt id="metrics.TemporalStdDev.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.TemporalStdDev.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the temporal std. dev. for a datasets.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">Overrides UnaryMetric.run()</p>
+</div>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The target_dataset on which to calculate the 
+temporal standard deviation.</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body">The temporal standard deviation of the target dataset</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><code class="xref py py-class docutils literal"><span class="pre">ndarray</span></code></td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="metrics.UnaryMetric">
+<em class="property">class </em><code class="descclassname">metrics.</code><code class="descname">UnaryMetric</code><a class="headerlink" href="#metrics.UnaryMetric" title="Permalink to this definition">¶</a></dt>
+<dd><p>Abstract Base Class from which all unary metrics inherit.</p>
+<dl class="method">
+<dt id="metrics.UnaryMetric.run">
+<code class="descname">run</code><span class="sig-paren">(</span><em>target_dataset</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.UnaryMetric.run" title="Permalink to this definition">¶</a></dt>
+<dd><p>Run the metric for a given target dataset.</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>target_dataset</strong> (<a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a>) &#8211; The dataset on which the current metric will
+be run.</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body">The result of evaluating the metric on the target_dataset.</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="function">
+<dt id="metrics.calc_bias">
+<code class="descclassname">metrics.</code><code class="descname">calc_bias</code><span class="sig-paren">(</span><em>target_array</em>, <em>reference_array</em>, <em>average_over_time=False</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.calc_bias" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate difference between two arrays</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>target_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array to be evaluated, as model output</li>
+<li><strong>reference_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array of reference dataset</li>
+<li><strong>average_over_time</strong> (<em>&#8216;bool&#8217;</em>) &#8211; if True, calculated bias is averaged for the axis=0</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">Biases array of the target dataset</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:class:&#8217;numpy.ma.core.MaskedArray&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+<dl class="function">
+<dt id="metrics.calc_correlation">
+<code class="descclassname">metrics.</code><code class="descname">calc_correlation</code><span class="sig-paren">(</span><em>target_array</em>, <em>reference_array</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.calc_correlation" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate the correlation coefficient between two arrays.</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>target_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array to be evaluated, as model output</li>
+<li><strong>reference_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array of reference dataset</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">pearson&#8217;s correlation coefficient between the two input arrays</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:class:&#8217;numpy.ma.core.MaskedArray&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+<dl class="function">
+<dt id="metrics.calc_rmse">
+<code class="descclassname">metrics.</code><code class="descname">calc_rmse</code><span class="sig-paren">(</span><em>target_array</em>, <em>reference_array</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.calc_rmse" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate ratio of standard deivations of the two arrays</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>target_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array to be evaluated, as model output</li>
+<li><strong>reference_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array of reference dataset</li>
+<li><strong>average_over_time</strong> (<em>&#8216;bool&#8217;</em>) &#8211; if True, calculated bias is averaged for the axis=0</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">root mean square error</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:class:&#8217;float&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+<dl class="function">
+<dt id="metrics.calc_stddev">
+<code class="descclassname">metrics.</code><code class="descname">calc_stddev</code><span class="sig-paren">(</span><em>array</em>, <em>axis=None</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.calc_stddev" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate a sample standard deviation of an array along the array</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array to calculate sample standard deviation</li>
+<li><strong>axis</strong> (<em>&#8216;int&#8217;</em>) &#8211; Axis along which the sample standard deviation is computed.</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">sample standard deviation of array</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:class:&#8217;numpy.ma.core.MaskedArray&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+<dl class="function">
+<dt id="metrics.calc_stddev_ratio">
+<code class="descclassname">metrics.</code><code class="descname">calc_stddev_ratio</code><span class="sig-paren">(</span><em>target_array</em>, <em>reference_array</em><span class="sig-paren">)</span><a class="headerlink" href="#metrics.calc_stddev_ratio" title="Permalink to this definition">¶</a></dt>
+<dd><p>Calculate ratio of standard deivations of the two arrays</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
+<li><strong>target_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array to be evaluated, as model output</li>
+<li><strong>reference_array</strong> (<em>:class:&#8217;numpy.ma.core.MaskedArray&#8217;</em>) &#8211; an array of reference dataset</li>
+<li><strong>average_over_time</strong> (<em>&#8216;bool&#8217;</em>) &#8211; if True, calculated bias is averaged for the axis=0</li>
+</ul>
+</td>
+</tr>
+<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">(standard deviation of target_array)/(standard deviation of reference array)</p>
+</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">:class:&#8217;float&#8217;</p>
+</td>
+</tr>
+</tbody>
+</table>
+</dd></dl>
+
+</div>
+
+
+          </div>
+        </div>
+      </div>
+      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
+        <div class="sphinxsidebarwrapper">
+            <p class="logo"><a href="../index.html">
+              <img class="logo" src="../_static/ocw-logo-variant-sm-01-01-new.png" alt="Logo"/>
+            </a></p>
+  <h4>Previous topic</h4>
+  <p class="topless"><a href="evaluation.html"
+                        title="previous chapter">Evaluation Module</a></p>
+  <h4>Next topic</h4>
+  <p class="topless"><a href="plotter.html"
+                        title="next chapter">Plotter Module</a></p>
+  <div role="note" aria-label="source link">
+    <h3>This Page</h3>
+    <ul class="this-page-menu">
+      <li><a href="../_sources/ocw/metrics.txt"
+            rel="nofollow">Show Source</a></li>
+    </ul>
+   </div>
+<div id="searchbox" style="display: none" role="search">
+  <h3>Quick search</h3>
+    <form class="search" action="../search.html" method="get">
+      <input type="text" name="q" />
+      <input type="submit" value="Go" />
+      <input type="hidden" name="check_keywords" value="yes" />
+      <input type="hidden" name="area" value="default" />
+    </form>
+    <p class="searchtip" style="font-size: 90%">
+    Enter search terms or a module, class or function name.
+    </p>
+</div>
+<script type="text/javascript">$('#searchbox').show(0);</script>
+        </div>
+      </div>
+      <div class="clearer"></div>
+    </div>
+    <div class="footer">
+      &copy;2016, Apache Software Foundation.
+      
+      |
+      Powered by <a href="http://sphinx-doc.org/">Sphinx 1.3.1</a>
+      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.4</a>
+      
+      |
+      <a href="../_sources/ocw/metrics.txt"
+          rel="nofollow">Page source</a></li>
+    </div>
+
+    
+
+    
+  </body>
+</html>
\ No newline at end of file

Added: websites/staging/climate/trunk/content/api/1.1.0/ocw/overview.html
==============================================================================
--- websites/staging/climate/trunk/content/api/1.1.0/ocw/overview.html (added)
+++ websites/staging/climate/trunk/content/api/1.1.0/ocw/overview.html Wed Jul 27 17:45:58 2016
@@ -0,0 +1,304 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+  <head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    
+    <title>Overview &mdash; Apache Open Climate Workbench 1.1.0 documentation</title>
+    
+    <link rel="stylesheet" href="../_static/alabaster.css" type="text/css" />
+    <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+    
+    <script type="text/javascript">
+      var DOCUMENTATION_OPTIONS = {
+        URL_ROOT:    '../',
+        VERSION:     '1.1.0',
+        COLLAPSE_INDEX: false,
+        FILE_SUFFIX: '.html',
+        HAS_SOURCE:  true
+      };
+    </script>
+    <script type="text/javascript" src="../_static/jquery.js"></script>
+    <script type="text/javascript" src="../_static/underscore.js"></script>
+    <script type="text/javascript" src="../_static/doctools.js"></script>
+    <link rel="top" title="Apache Open Climate Workbench 1.1.0 documentation" href="../index.html" />
+    <link rel="next" title="Dataset Module" href="dataset.html" />
+    <link rel="prev" title="Welcome to Apache Open Climate Workbench’s documentation!" href="../index.html" />
+   
+  
+  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9">
+
+  </head>
+  <body role="document">
+    <div class="related" role="navigation" aria-label="related navigation">
+      <h3>Navigation</h3>
+      <ul>
+        <li class="right" style="margin-right: 10px">
+          <a href="../genindex.html" title="General Index"
+             accesskey="I">index</a></li>
+        <li class="right" >
+          <a href="../http-routingtable.html" title="HTTP Routing Table"
+             >routing table</a> |</li>
+        <li class="right" >
+          <a href="../py-modindex.html" title="Python Module Index"
+             >modules</a> |</li>
+        <li class="right" >
+          <a href="dataset.html" title="Dataset Module"
+             accesskey="N">next</a> |</li>
+        <li class="right" >
+          <a href="../index.html" title="Welcome to Apache Open Climate Workbench’s documentation!"
+             accesskey="P">previous</a> |</li>
+        <li class="nav-item nav-item-0"><a href="../index.html">Apache Open Climate Workbench 1.1.0 documentation</a> &raquo;</li> 
+      </ul>
+    </div>  
+
+    <div class="document">
+      <div class="documentwrapper">
+        <div class="bodywrapper">
+          <div class="body" role="main">
+            
+  <div class="section" id="overview">
+<h1>Overview<a class="headerlink" href="#overview" title="Permalink to this headline">¶</a></h1>
+<p>The Apache Open Climate Workbench toolkit aims to provide a suit of tools to make Climate Scientists lives easier. It does this by providing tools for loading and manipulating datasets, running evaluations, and plotting results. Below is a breakdown of many of the OCW components with an explanation of how to use them. An OCW evaluation usually has the following steps:</p>
+<ol class="arabic simple">
+<li>Load one or more datasets</li>
+<li>Perform dataset manipulations (subset, temporal/spatial rebin, etc.)</li>
+<li>Load various metrics</li>
+<li>Instantiate and run the evaluation</li>
+<li>Plot results</li>
+</ol>
+<div class="section" id="common-data-abstraction">
+<h2>Common Data Abstraction<a class="headerlink" href="#common-data-abstraction" title="Permalink to this headline">¶</a></h2>
+<p>The OCW <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> class is the primary data abstraction used throughout OCW. It facilitates the uniform handling of data throughout the toolkit and provides a few useful helper functions such as <a class="reference internal" href="dataset.html#dataset.Dataset.spatial_boundaries" title="dataset.Dataset.spatial_boundaries"><code class="xref py py-func docutils literal"><span class="pre">dataset.Dataset.spatial_boundaries()</span></code></a> and <a class="reference internal" href="dataset.html#dataset.Dataset.time_range" title="dataset.Dataset.time_range"><code class="xref py py-func docutils literal"><span class="pre">dataset.Dataset.time_range()</span></code></a>. Creating a new dataset object is straightforward but generally you will want to use an OCW data source to load the data for you.</p>
+</div>
+<div class="section" id="data-sources">
+<h2>Data Sources<a class="headerlink" href="#data-sources" title="Permalink to this headline">¶</a></h2>
+<p>OCW data sources allow users to easily load <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> objects from a number of places. These data sources help with step 1 of an evaluation above. In general the primary file format that is supported is NetCDF. For instance, the <a class="reference internal" href="../data_source/data_sources.html#module-local" title="local"><code class="xref py py-mod docutils literal"><span class="pre">local</span></code></a>, <a class="reference internal" href="../data_source/data_sources.html#module-dap" title="dap"><code class="xref py py-mod docutils literal"><span class="pre">dap</span></code></a> and <a class="reference internal" href="../data_source/data_sources.html#module-esgf" title="esgf"><code class="xref py py-mod docutils literal"><span class="pre">esgf</span></code></a> data sources only support loading 
 NetCDF files from your local machine, an OpenDAP URL, and the ESGF respectively. Some data sources, such as <a class="reference internal" href="../data_source/data_sources.html#module-rcmed" title="rcmed"><code class="xref py py-mod docutils literal"><span class="pre">rcmed</span></code></a>, point to externally supported data sources. In the case of the RCMED data source, the Regional Climate Model Evaluation Database is run by NASA&#8217;s Jet Propulsion Laboratory.</p>
+<p>Adding additional data sources is quite simple. The only API limitation that we have on a data source is that it returns a valid <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> object. Please feel free to send patches for adding more data sources.</p>
+<p>A simple example using the <a class="reference internal" href="../data_source/data_sources.html#module-local" title="local"><code class="xref py py-mod docutils literal"><span class="pre">local</span></code></a> data source to load a NetCDF file from your local machine:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.data_source.local</span> <span class="kn">as</span> <span class="nn">local</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">ds</span> <span class="o">=</span> <span class="n">local</span><span class="o">.</span><span class="n">load_file</span><span class="p">(</span><span class="s">&#39;/tmp/some_dataset.nc&#39;</span><span class="p">,</span> <span class="s">&#39;SomeVarInTheDataset&#39;</span><span class="p">)</span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="dataset-manipulations">
+<h2>Dataset Manipulations<a class="headerlink" href="#dataset-manipulations" title="Permalink to this headline">¶</a></h2>
+<p>All <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> manipulations are handled by the <a class="reference internal" href="dataset_processor.html#module-dataset_processor" title="dataset_processor"><code class="xref py py-mod docutils literal"><span class="pre">dataset_processor</span></code></a> module. In general, an evaluation will include calls to <a class="reference internal" href="dataset_processor.html#dataset_processor.subset" title="dataset_processor.subset"><code class="xref py py-func docutils literal"><span class="pre">dataset_processor.subset()</span></code></a>, <a class="reference internal" href="dataset_processor.html#dataset_processor.spatial_regrid" title="dataset_processor.spatial_regrid"><code class="xref py py-func docutils literal"><span class="pre">dataset_processor.spatial_regrid()</span></code></a>, and <a class="refe
 rence internal" href="dataset_processor.html#dataset_processor.temporal_rebin" title="dataset_processor.temporal_rebin"><code class="xref py py-func docutils literal"><span class="pre">dataset_processor.temporal_rebin()</span></code></a> to ensure that the datasets can actually be compared. <a class="reference internal" href="dataset_processor.html#module-dataset_processor" title="dataset_processor"><code class="xref py py-mod docutils literal"><span class="pre">dataset_processor</span></code></a> functions take a <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> object and some various parameters and return a modified <a class="reference internal" href="dataset.html#dataset.Dataset" title="dataset.Dataset"><code class="xref py py-class docutils literal"><span class="pre">dataset.Dataset</span></code></a> object. The original dataset is never ma
 nipulated in the process.</p>
+<p>Subsetting is a great way to speed up your processing and keep useless data out of your plots. Notice that we&#8217;re using a <a class="reference internal" href="dataset.html#dataset.Bounds" title="dataset.Bounds"><code class="xref py py-class docutils literal"><span class="pre">dataset.Bounds</span></code></a> objec to represent the area of interest:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.dataset_processor</span> <span class="kn">as</span> <span class="nn">dsp</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">new_bounds</span> <span class="o">=</span> <span class="n">Bounds</span><span class="p">(</span><span class="n">min_lat</span><span class="p">,</span> <span class="n">max_lat</span><span class="p">,</span> <span class="n">min_lon</span><span class="p">,</span> <span class="n">max_lon</span><span class="p">,</span> <span class="n">start_time</span><span class="p">,</span> <span class="n">end_time</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">knmi_dataset</span> <span class="o">=</span> <span class="n">dsp</span><span class="o">.</span><span class="n">subset</span><span class="p">(</span><span class="n">new_bounds</span><span class="p">,</span> <span class="n">knmi_dataset</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>Temporally re-binning a dataset is great when the time step of the data is too fine grain for the desired use. For instance, perhaps we want to see a yearly trend but we have daily data. We would need to make the following call to adjust our dataset:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">knmi_dataset</span> <span class="o">=</span> <span class="n">dsp</span><span class="o">.</span><span class="n">temporal_rebin</span><span class="p">(</span><span class="n">knmi_dataset</span><span class="p">,</span> <span class="n">datetime</span><span class="o">.</span><span class="n">timedelta</span><span class="p">(</span><span class="n">days</span><span class="o">=</span><span class="mi">365</span><span class="p">))</span>
+</pre></div>
+</div>
+<p>It is critically necessary for our datasets to be on the same lat/lon grid before we try to compare them. That&#8217;s where spatial re-gridding comes in helpful. Here we re-grid our example dataset onto a 1-degree lat/lon grid within the range that we subsetted the dataset previously:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">new_lons</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">min_lon</span><span class="p">,</span> <span class="n">max_lon</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">new_lats</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">min_lat</span><span class="p">,</span> <span class="n">max_lat</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">knmi_dataset</span> <span class="o">=</span> <span class="n">dsp</span><span class="o">.</span><span class="n">spatial_regrid</span><span class="p">(</span><span class="n">knmi_dataset</span><span class="p">,</span> <span class="n">new_lats</span><span class="p">,</span> <span class="n">new_lons</span><span class="p">)</span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="metrics">
+<h2>Metrics<a class="headerlink" href="#metrics" title="Permalink to this headline">¶</a></h2>
+<p>Metrics are the backbone of an evaluation. You&#8217;ll find a number of (hopefully) useful &#8220;default&#8221; metrics in the <a class="reference internal" href="metrics.html#module-metrics" title="metrics"><code class="xref py py-mod docutils literal"><span class="pre">metrics</span></code></a> module in the toolkit. In general you won&#8217;t be too likely to use a metric outside of an evaluation, however you could run a metric manually if you so desired.:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.metrics</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="c"># Load 2 datasets</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">bias</span> <span class="o">=</span> <span class="n">ocw</span><span class="o">.</span><span class="n">metrics</span><span class="o">.</span><span class="n">Bias</span><span class="p">()</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="k">print</span> <span class="n">bias</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">dataset1</span><span class="p">,</span> <span class="n">dataset2</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>While this might be exactly what you need to get the job done, it is far more likely that you&#8217;ll need to run a number of metrics over a number of datasets. That&#8217;s where running an evaluation comes in, but we&#8217;ll get to that shortly.</p>
+<p>There are two &#8220;types&#8221; of metrics that the toolkit supports. A unary metric acts on a single dataset and returns a result. A binary metric acts on a target and reference dataset and returns a result. This is helpful to know if you decide that the included metrics aren&#8217;t sufficient. We&#8217;ve attempted to make adding a new metric as simple as possible. You simply create a new class that inherits from either the unary or binary base classes and override the <cite>run</cite> function. At this point your metric will behave exactly like the included metrics in the toolkit. Below is an example of how one of the included metrics is implemented. If you need further assistance with your own metrics be sure to email the project&#8217;s mailing list!:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Bias</span><span class="p">(</span><span class="n">BinaryMetric</span><span class="p">):</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="sd">&#39;&#39;&#39;Calculate the bias between a reference and target dataset.&#39;&#39;&#39;</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="k">def</span> <span class="nf">run</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target_dataset</span><span class="p">):</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="s">&#39;&#39;&#39;Calculate the bias between a reference and target dataset.</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="o">..</span> <span class="n">note</span><span class="p">::</span>
+<span class="gp">&gt;&gt;&gt; </span>           <span class="n">Overrides</span> <span class="n">BinaryMetric</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="n">param</span> <span class="n">ref_dataset</span><span class="p">:</span> <span class="n">The</span> <span class="n">reference</span> <span class="n">dataset</span> <span class="n">to</span> <span class="n">use</span> <span class="ow">in</span> <span class="n">this</span> <span class="n">metric</span> <span class="n">run</span><span class="o">.</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="nb">type</span> <span class="n">ref_dataset</span><span class="p">:</span> <span class="n">ocw</span><span class="o">.</span><span class="n">dataset</span><span class="o">.</span><span class="n">Dataset</span> <span class="nb">object</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="n">param</span> <span class="n">target_dataset</span><span class="p">:</span> <span class="n">The</span> <span class="n">target</span> <span class="n">dataset</span> <span class="n">to</span> <span class="n">evaluate</span> <span class="n">against</span> <span class="n">the</span>
+<span class="gp">&gt;&gt;&gt; </span>            <span class="n">reference</span> <span class="n">dataset</span> <span class="ow">in</span> <span class="n">this</span> <span class="n">metric</span> <span class="n">run</span><span class="o">.</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="nb">type</span> <span class="n">target_dataset</span><span class="p">:</span> <span class="n">ocw</span><span class="o">.</span><span class="n">dataset</span><span class="o">.</span><span class="n">Dataset</span> <span class="nb">object</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="n">returns</span><span class="p">:</span> <span class="n">The</span> <span class="n">difference</span> <span class="n">between</span> <span class="n">the</span> <span class="n">reference</span> <span class="ow">and</span> <span class="n">target</span> <span class="n">datasets</span><span class="o">.</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="p">:</span><span class="n">rtype</span><span class="p">:</span> <span class="n">Numpy</span> <span class="n">Array</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="s">&#39;&#39;&#39;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="s">        return ref_dataset.values - target_dataset.values</span>
+</pre></div>
+</div>
+<p>While this might look a bit scary at first, if we take out all the documentation you&#8217;ll see that it&#8217;s really extremely simple.:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="c"># Our new Bias metric inherits from the Binary Metric base class</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Bias</span><span class="p">(</span><span class="n">BinaryMetric</span><span class="p">):</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># Since our new metric is a binary metric we need to override</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># the run funtion in the BinaryMetric base class.</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="k">def</span> <span class="nf">run</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target_dataset</span><span class="p">):</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="c"># To implement the bias metric we simply return the difference</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="c"># between the reference and target dataset&#39;s values arrays.</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="k">return</span> <span class="n">ref_dataset</span><span class="o">.</span><span class="n">values</span> <span class="o">-</span> <span class="n">target_dataset</span><span class="o">.</span><span class="n">values</span>
+</pre></div>
+</div>
+<p>It is very important to note that you shouldn&#8217;t change the datasets that are passed into the metric that you&#8217;re implementing. If you do you might cause unexpected results in future parts of the evaluation. If you need to do manipulations, copy the data first and do manipulations on the copy. Leave the original dataset alone!</p>
+</div>
+<div class="section" id="handling-an-evaluation">
+<h2>Handling an Evaluation<a class="headerlink" href="#handling-an-evaluation" title="Permalink to this headline">¶</a></h2>
+<p>We saw above that it is easy enough to run a metric over a few datasets manually. However, when we have a lot of datasets and/or a lot of metrics to run that can become tedious and error prone. This is where the <a class="reference internal" href="evaluation.html#evaluation.Evaluation" title="evaluation.Evaluation"><code class="xref py py-class docutils literal"><span class="pre">evaluation.Evaluation</span></code></a> class comes in handy. It ensures that all the metrics that you choose are run over all combinations of the datasets that you input. Consider the following simple example:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.evaluation</span> <span class="kn">as</span> <span class="nn">eval</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.data_source.local</span> <span class="kn">as</span> <span class="nn">local</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">ocw.metrics</span> <span class="kn">as</span> <span class="nn">metrics</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="c"># Load a few datasets</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">ref_dataset</span> <span class="o">=</span> <span class="n">local</span><span class="o">.</span><span class="n">load_file</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">target1</span> <span class="o">=</span> <span class="n">local</span><span class="o">.</span><span class="n">load_file</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">target2</span> <span class="o">=</span> <span class="n">local</span><span class="o">.</span><span class="n">load_file</span><span class="p">(</span><span class="o">...</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">target_datasets</span> <span class="o">=</span> <span class="p">[</span><span class="n">target1</span><span class="p">,</span> <span class="n">target2</span><span class="p">]</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="c"># Do some dataset manipulations here such as subsetting and regridding</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="c"># Load a few metrics</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">bias</span> <span class="o">=</span> <span class="n">metrics</span><span class="o">.</span><span class="n">Bias</span><span class="p">()</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">tstd</span> <span class="o">=</span> <span class="n">metrics</span><span class="o">.</span><span class="n">TemporalStdDev</span><span class="p">()</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">metrics</span> <span class="o">=</span> <span class="p">[</span><span class="n">bias</span><span class="p">,</span> <span class="n">tstd</span><span class="p">]</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">new_eval</span> <span class="o">=</span> <span class="nb">eval</span><span class="o">.</span><span class="n">Evaluation</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target_datasets</span><span class="p">,</span> <span class="n">metrics</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">new_eval</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="k">print</span> <span class="n">new_eval</span><span class="o">.</span><span class="n">results</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="k">print</span> <span class="n">new_eval</span><span class="o">.</span><span class="n">unary_results</span>
+</pre></div>
+</div>
+<p>First we load all of our datasets and do any manipulations (which we leave out for brevity). Then we load the metrics that we want to run, namely Bias and TemporalStdDev. We then load our evaluation object.:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">new_eval</span> <span class="o">=</span> <span class="nb">eval</span><span class="o">.</span><span class="n">Evaluation</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target_datasets</span><span class="p">,</span> <span class="n">metrics</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>Notice two things about this. First, we&#8217;re splitting the datasets into a reference dataset (ref_dataset) and a list of target datasets (target_datasets). Second, one of the metrics that we loaded (<a class="reference internal" href="metrics.html#metrics.TemporalStdDev" title="metrics.TemporalStdDev"><code class="xref py py-class docutils literal"><span class="pre">metrics.TemporalStdDev</span></code></a>) is a unary metric. The reference/target dataset split is necessary to handling binary metrics. When an evaluation is run, all the binary metrics are run against every (reference, target) dataset pair. So the above evaluation could be replaced with the following calls. Of course this wouldn&#8217;t handle the unary metric, but we&#8217;ll get to that in a second.:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">result1</span> <span class="o">=</span> <span class="n">bias</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target1</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">result2</span> <span class="o">=</span> <span class="n">bias</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target2</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>Unary metrics are handled slightly differently but they&#8217;re still simple. Each unary metric passed into the evaluation is run against <em>every</em> dataset in the evaluation. So we could replace the above evaluation with the following calls:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">unary_result1</span> <span class="o">=</span> <span class="n">tstd</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">unary_result2</span> <span class="o">=</span> <span class="n">tstd</span><span class="p">(</span><span class="n">target1</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">unary_result3</span> <span class="o">=</span> <span class="n">tstd</span><span class="p">(</span><span class="n">target2</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>The only other part that we need to explore to fully understand the <code class="xref py py-class docutils literal"><span class="pre">evalution.Evaluation</span></code> class is how the results are stored internally from the run. The <cite>results</cite> list is a multidimensional array holding all the binary metric results and the <cite>unary_results</cite> is a list holding all the unary metric results. To more accurately replace the above evaluation with manual calls we would write the following:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="n">results</span> <span class="o">=</span> <span class="p">[</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># Results for target1</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">[</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="n">bias</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target1</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="c"># If there were other binary metrics, the results would be here.</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">],</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># Results for target2</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">[</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="n">bias</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">,</span> <span class="n">target2</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="c"># If there were other binary metrics, the results would be here.</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">]</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="p">]</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">unary_results</span> <span class="o">=</span> <span class="p">[</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># Results for TemporalStdDev</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">[</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="n">tstd</span><span class="p">(</span><span class="n">ref_dataset</span><span class="p">),</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="n">tstd</span><span class="p">(</span><span class="n">target1</span><span class="p">),</span>
+<span class="gp">&gt;&gt;&gt; </span>        <span class="n">tstd</span><span class="p">(</span><span class="n">target2</span><span class="p">)</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="p">]</span>
+<span class="gp">&gt;&gt;&gt; </span>    <span class="c"># If there were other unary metrics, the results would be in a list here.</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="p">]</span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="plotting">
+<h2>Plotting<a class="headerlink" href="#plotting" title="Permalink to this headline">¶</a></h2>
+<p>Plotting can be fairly complicated business. Luckily we have <a class="reference external" href="https://cwiki.apache.org/confluence/display/CLIMATE/Guide+to+Plotting+API">pretty good documentation</a> on the project wiki that can help you out. There are also fairly simple examples in the project&#8217;s example folder with the remainder of the code such as the following:</p>
+<div class="highlight-python"><div class="highlight"><pre><span class="gp">&gt;&gt;&gt; </span><span class="c"># Let&#39;s grab the values returned for bias.run(ref_dataset, target1)</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">results</span> <span class="o">=</span> <span class="n">bias_evaluation</span><span class="o">.</span><span class="n">results</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">Here</span><span class="s">&#39;s the same lat/lons we used earlier when we were re-gridding</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">lats</span> <span class="o">=</span> <span class="n">new_lats</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">lons</span> <span class="o">=</span> <span class="n">new_lons</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">fname</span> <span class="o">=</span> <span class="s">&#39;My_Test_Plot&#39;</span>
+<span class="go">&gt;&gt;&gt;</span>
+<span class="gp">&gt;&gt;&gt; </span><span class="n">plotter</span><span class="o">.</span><span class="n">draw_contour_map</span><span class="p">(</span><span class="n">results</span><span class="p">,</span> <span class="n">lats</span><span class="p">,</span> <span class="n">lons</span><span class="p">,</span> <span class="n">fname</span><span class="p">)</span>
+</pre></div>
+</div>
+<p>This would give you a contour map calls <cite>My_Test_Plot</cite> for the requested bias metric run.</p>
+</div>
+</div>
+
+
+          </div>
+        </div>
+      </div>
+      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
+        <div class="sphinxsidebarwrapper">
+            <p class="logo"><a href="../index.html">
+              <img class="logo" src="../_static/ocw-logo-variant-sm-01-01-new.png" alt="Logo"/>
+            </a></p>
+  <h3><a href="../index.html">Table Of Contents</a></h3>
+  <ul>
+<li><a class="reference internal" href="#">Overview</a><ul>
+<li><a class="reference internal" href="#common-data-abstraction">Common Data Abstraction</a></li>
+<li><a class="reference internal" href="#data-sources">Data Sources</a></li>
+<li><a class="reference internal" href="#dataset-manipulations">Dataset Manipulations</a></li>
+<li><a class="reference internal" href="#metrics">Metrics</a></li>
+<li><a class="reference internal" href="#handling-an-evaluation">Handling an Evaluation</a></li>
+<li><a class="reference internal" href="#plotting">Plotting</a></li>
+</ul>
+</li>
+</ul>
+
+  <h4>Previous topic</h4>
+  <p class="topless"><a href="../index.html"
+                        title="previous chapter">Welcome to Apache Open Climate Workbench&#8217;s documentation!</a></p>
+  <h4>Next topic</h4>
+  <p class="topless"><a href="dataset.html"
+                        title="next chapter">Dataset Module</a></p>
+  <div role="note" aria-label="source link">
+    <h3>This Page</h3>
+    <ul class="this-page-menu">
+      <li><a href="../_sources/ocw/overview.txt"
+            rel="nofollow">Show Source</a></li>
+    </ul>
+   </div>
+<div id="searchbox" style="display: none" role="search">
+  <h3>Quick search</h3>
+    <form class="search" action="../search.html" method="get">
+      <input type="text" name="q" />
+      <input type="submit" value="Go" />
+      <input type="hidden" name="check_keywords" value="yes" />
+      <input type="hidden" name="area" value="default" />
+    </form>
+    <p class="searchtip" style="font-size: 90%">
+    Enter search terms or a module, class or function name.
+    </p>
+</div>
+<script type="text/javascript">$('#searchbox').show(0);</script>
+        </div>
+      </div>
+      <div class="clearer"></div>
+    </div>
+    <div class="footer">
+      &copy;2016, Apache Software Foundation.
+      
+      |
+      Powered by <a href="http://sphinx-doc.org/">Sphinx 1.3.1</a>
+      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.4</a>
+      
+      |
+      <a href="../_sources/ocw/overview.txt"
+          rel="nofollow">Page source</a></li>
+    </div>
+
+    
+
+    
+  </body>
+</html>
\ No newline at end of file