CLIMATE-581 - Add tests for metric config output API
Project: http://git-wip-us.apache.org/repos/asf/climate/repo Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/e3dcace6 Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/e3dcace6 Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/e3dcace6 Branch: refs/heads/master Commit: e3dcace61db9840adf6312754b67633971915dca Parents: 0df1546 Author: Michael Joyce <[email protected]> Authored: Tue Mar 24 16:03:42 2015 -0700 Committer: Michael Joyce <[email protected]> Committed: Tue Mar 24 16:03:42 2015 -0700 ---------------------------------------------------------------------- ocw-config-runner/tests/test_config_writer.py | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/climate/blob/e3dcace6/ocw-config-runner/tests/test_config_writer.py ---------------------------------------------------------------------- diff --git a/ocw-config-runner/tests/test_config_writer.py b/ocw-config-runner/tests/test_config_writer.py index ebae0de..b76b0b7 100644 --- a/ocw-config-runner/tests/test_config_writer.py +++ b/ocw-config-runner/tests/test_config_writer.py @@ -19,6 +19,8 @@ from mock import patch import unittest from ocw.dataset import Dataset +from ocw.evaluation import Evaluation +import ocw.metrics as metrics import configuration_writer as writer import datetime as dt @@ -268,3 +270,34 @@ class TestDAPDatasetExportGeneration(unittest.TestCase): def test_proper_units_name_export(self): self.assertEqual(self.exported_info['optional_args']['units'], self.units) + + +class TestMetricExportGeneration(unittest.TestCase): + @classmethod + def setUpClass(self): + self.bias = metrics.Bias() + self.tmp_std_dev = metrics.TemporalStdDev() + loaded_metrics = [self.bias, self.tmp_std_dev] + + self.evaluation = Evaluation(None, [], loaded_metrics) + + def test_proper_export_format(self): + out = writer.generate_metric_information(self.evaluation) + + self.assertTrue(type(out) == type(list())) + + for name in out: + self.assertTrue(type(name) == type(str())) + + def test_proper_metric_name_export(self): + out = writer.generate_metric_information(self.evaluation) + + self.assertTrue(self.bias.__class__.__name__ in out) + self.assertTrue(self.tmp_std_dev.__class__.__name__ in out) + + def test_empty_metrics_in_evaluation(self): + new_eval = Evaluation(None, [], []) + out = writer.generate_metric_information(new_eval) + + self.assertTrue(type(out) == type(list())) + self.assertTrue(len(out) == 0)
