You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@climate.apache.org by jo...@apache.org on 2015/03/31 16:59:30 UTC

[11/13] climate git commit: CLIMATE-581 - Fix minor trailing/leading whitespace issues

CLIMATE-581 - Fix minor trailing/leading whitespace issues


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/9167f1a5
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/9167f1a5
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/9167f1a5

Branch: refs/heads/master
Commit: 9167f1a5c8366ecda77693a61c49f345ce2c41d8
Parents: f83e4be
Author: Michael Joyce <jo...@apache.org>
Authored: Wed Mar 25 11:33:22 2015 -0700
Committer: Michael Joyce <jo...@apache.org>
Committed: Wed Mar 25 11:33:22 2015 -0700

----------------------------------------------------------------------
 ocw-config-runner/configuration_writer.py     |  6 +++---
 ocw-config-runner/tests/test_config_writer.py | 12 ++++++------
 2 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/9167f1a5/ocw-config-runner/configuration_writer.py
----------------------------------------------------------------------
diff --git a/ocw-config-runner/configuration_writer.py b/ocw-config-runner/configuration_writer.py
index c78f46f..f7fb1c9 100644
--- a/ocw-config-runner/configuration_writer.py
+++ b/ocw-config-runner/configuration_writer.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
 
 def export_evaluation_to_config(evaluation, file_path='./exported_eval.yaml'):
     ''' Export an evaluation to a config file
-    
+
     :param evaluation: The evaluation object to export.
     :type evaluation: :class:`evaluation.Evaluation`
 
@@ -42,7 +42,7 @@ def export_evaluation_to_config(evaluation, file_path='./exported_eval.yaml'):
 
 def generate_dataset_information(evaluation):
     ''' Generate dataset config file output for a given Evaluation object.
-    
+
     :param evaluation: The evaluation object from which to extract metrics.
     :type evaluation: :class:`evaluation.Evaluation`
 
@@ -124,7 +124,7 @@ def generate_evaluation_information(evaluation):
     configuration information. It's possible that you will encounter a scenario
     where the guessed values are not what you want/expect. Please double
     check the output before blinding trusting what this generates.
-    
+
     :param evaluation: The evaluation object from which to extract metrics.
     :type evaluation: :class:`evaluation.Evaluation`
 

http://git-wip-us.apache.org/repos/asf/climate/blob/9167f1a5/ocw-config-runner/tests/test_config_writer.py
----------------------------------------------------------------------
diff --git a/ocw-config-runner/tests/test_config_writer.py b/ocw-config-runner/tests/test_config_writer.py
index 2d6a9f6..97b7e01 100644
--- a/ocw-config-runner/tests/test_config_writer.py
+++ b/ocw-config-runner/tests/test_config_writer.py
@@ -293,7 +293,7 @@ class TestDatasetExportFromEvaluation(unittest.TestCase):
             'time_name': 'a time name',
             'elevation_index': 2
         }
-        
+
         self.rcmed_origin = {
             'source': 'rcmed',
             'dataset_id': 4,
@@ -332,7 +332,7 @@ class TestDatasetExportFromEvaluation(unittest.TestCase):
             name=self.name,
             origin=self.rcmed_origin
         )
-        
+
         self.esgf_ds = Dataset(
             self.lats,
             self.lons,
@@ -589,7 +589,7 @@ class FullExportTest(unittest.TestCase):
             'time_name': 'a time name',
             'elevation_index': 2
         }
-        
+
         self.rcmed_origin = {
             'source': 'rcmed',
             'dataset_id': 4,
@@ -628,7 +628,7 @@ class FullExportTest(unittest.TestCase):
             name=self.name,
             origin=self.rcmed_origin
         )
-        
+
         self.esgf_ds = Dataset(
             self.lats,
             self.lons,
@@ -690,7 +690,7 @@ class FullExportTest(unittest.TestCase):
             self.assertTrue(metric.__class__.__name__ in data['metrics'])
 
         total_eval_metrics = (
-            len(self.evaluation.metrics) + 
+            len(self.evaluation.metrics) +
             len(self.evaluation.unary_metrics)
         )
 
@@ -710,7 +710,7 @@ class FullExportTest(unittest.TestCase):
         self.assertTrue('targets' in data['datasets'])
 
         self.assertAlmostEqual(
-            writer.generate_dataset_information(self.evaluation), 
+            writer.generate_dataset_information(self.evaluation),
             data['datasets']
         )