CLIMATE-883 - Ensure python 3 builds pass unit tests

Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/528fe168
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/528fe168
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/528fe168

Branch: refs/heads/master
Commit: 528fe168c007ca3509f23f0bec9157fc0b2801be
Parents: 088706d
Author: Alex Goodman <ago...@users.noreply.github.com>
Authored: Fri Oct 21 16:47:49 2016 -0700
Committer: Alex Goodman <ago...@users.noreply.github.com>
Committed: Fri Oct 21 17:28:22 2016 -0700

----------------------------------------------------------------------
 ocw/data_source/rcmed.py            |  35 ++++++++-----
 ocw/dataset_processor.py            |  12 ++---
 ocw/evaluation.py                   |  24 ++++-----
 ocw/plotter.py                      |   6 +--
 ocw/tests/parameter_values.npy      | Bin 0 -> 67280 bytes
 ocw/tests/parameters_values.p       |  44 ----------------
 ocw/tests/test_dataset.py           |   6 +--
 ocw/tests/test_dataset_processor.py |   8 +--
 ocw/tests/test_local.py             |  85 ++++++++++++++++---------------
 ocw/tests/test_plotter.py           |   4 +-
 ocw/tests/test_rcmed.py             |  58 +++++++++++----------
 ocw/utils.py                        |  25 +++++++--
 12 files changed, 149 insertions(+), 158 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/528fe168/ocw/data_source/rcmed.py
----------------------------------------------------------------------
diff --git a/ocw/data_source/rcmed.py b/ocw/data_source/rcmed.py
index 82b4c29..9c5ad59 100644
--- a/ocw/data_source/rcmed.py
+++ b/ocw/data_source/rcmed.py
@@ -21,9 +21,14 @@ Classes:
     More information about the RCMED Query Specification can be found below:
     https://rcmes.jpl.nasa.gov/query-api/query.php?
 '''
+# Needed Python 2/3 urllib compatability
+try:
+    from urllib.parse import urlencode
+    from urllib.request import urlopen
+except ImportError:
+    from urllib import urlencode
+    from urllib2 import urlopen
 
-import urllib
-import urllib2
 import re
 import json
 import numpy as np
@@ -45,8 +50,8 @@ def get_parameters_metadata():
 
     param_info_list = []
     url = URL + "&param_info=yes"
-    string = urllib2.urlopen(url)
-    data_string = string.read()
+    string = urlopen(url)
+    data_string = string.read().decode('utf-8')
     json_format_data = json.loads(data_string)
     fields_name = json_format_data['fields_name']
     data = json_format_data['data']
@@ -74,7 +79,8 @@ def _make_mask_array(values, parameter_id, 
parameters_metadata):
     '''
 
     for each in parameters_metadata:
-        if each['parameter_id'].encode() == str(parameter_id):
+        if str(each['parameter_id']) == str(parameter_id):
+            print('ok')
             missing_values = each['missingdataflag'].encode()
             break
     missing_values = float(missing_values)
@@ -89,7 +95,7 @@ def _reshape_values(values, unique_values):
     :param values: Raw values data
     :type values: numpy array
     :param unique_values: Tuple of unique latitudes, longitudes and times data.
-    :type unique_values: Tuple 
+    :type unique_values: Tuple
 
     :returns: Reshaped values data
     :rtype: Numpy array
@@ -118,7 +124,8 @@ def _calculate_time(unique_times, time_step):
 
     time_format = "%Y-%m-%d %H:%M:%S"
     unique_times = np.array(
-        [datetime.strptime(time, time_format) for time in unique_times])
+        [datetime.strptime(time.decode('utf-8'), time_format)
+                           for time in unique_times])
     # There is no need to sort time.
     # This function may required still in RCMES
     # unique_times.sort()
@@ -158,11 +165,11 @@ def _get_data(url):
     :rtype: (Numpy array, Numpy array, Numpy array, Numpy array)
     '''
 
-    string = urllib2.urlopen(url)
+    string = urlopen(url)
     data_string = string.read()
-    index_of_data = re.search('data: \r\n', data_string)
+    index_of_data = re.search(b'data: \r\n', data_string)
     data = data_string[index_of_data.end():len(data_string)]
-    data = data.split('\r\n')
+    data = data.split(b'\r\n')
 
     lats = []
     lons = []
@@ -172,7 +179,7 @@ def _get_data(url):
 
     # Because the last row is empty, "len(data)-1" is used.
     for i in range(len(data) - 1):
-        row = data[i].split(',')
+        row = data[i].split(b',')
         lats.append(np.float32(row[0]))
         lons.append(np.float32(row[1]))
         # Level is not currently supported in Dataset class.
@@ -248,7 +255,7 @@ def _generate_query_url(dataset_id, parameter_id, min_lat, 
max_lat, min_lon, max
     :type max_lon: Float
     :param start_time: Start time
     :type start_time: Datetime
-    :param end_time: End time 
+    :param end_time: End time
     :type end_time: Datetime
     :param time_step: Time step
     :type time_step: String
@@ -265,7 +272,7 @@ def _generate_query_url(dataset_id, parameter_id, min_lat, 
max_lat, min_lon, max
     query = [('datasetId', dataset_id), ('parameterId', parameter_id), 
('latMin', min_lat), ('latMax', max_lat),
              ('lonMin', min_lon), ('lonMax', max_lon), ('timeStart', 
start_time), ('timeEnd', end_time)]
 
-    query_url = urllib.urlencode(query)
+    query_url = urlencode(query)
     url_request = URL + query_url
 
     return url_request
@@ -320,7 +327,7 @@ def parameter_dataset(dataset_id, parameter_id, min_lat, 
max_lat, min_lon, max_l
     :param start_time: Start time
     :type start_time: :class:`datetime.datetime`
 
-    :param end_time: End time 
+    :param end_time: End time
     :type end_time: :class:`datetime.datetime`
 
     :param name: (Optional) A name for the loaded dataset.

http://git-wip-us.apache.org/repos/asf/climate/blob/528fe168/ocw/dataset_processor.py
----------------------------------------------------------------------
diff --git a/ocw/dataset_processor.py b/ocw/dataset_processor.py
index 5ec0de4..6579677 100755
--- a/ocw/dataset_processor.py
+++ b/ocw/dataset_processor.py
@@ -54,8 +54,8 @@ def temporal_subset(target_dataset, month_start, month_end,
     """
 
     if month_start > month_end:
-        month_index = range(month_start, 13)
-        month_index.extend(range(1, month_end + 1))
+        month_index = list(range(month_start, 13))
+        month_index.extend(list(range(1, month_end + 1)))
     else:
         month_index = range(month_start, month_end + 1)
 
@@ -150,7 +150,7 @@ def temporal_rebin_with_time_index(target_dataset, 
nt_average):
                'be a multiple of nt_average')
         print(msg)
     # nt2 is the length of time dimension in the rebinned dataset
-    nt2 = nt / nt_average
+    nt2 = nt // nt_average
     binned_dates = target_dataset.times[np.arange(nt2) * nt_average]
     binned_values = ma.zeros(
         np.insert(target_dataset.values.shape[1:], 0, nt2))
@@ -1069,7 +1069,7 @@ def _rcmes_calc_average_on_new_time_unit(data, dates, 
unit):
     nt, ny, nx = data.shape
     if unit == 'full':
         new_data = ma.mean(data, axis=0)
-        new_date = [dates[dates.size / 2]]
+        new_date = [dates[dates.size // 2]]
     if unit == 'annual':
         years = [d.year for d in dates]
         years_sorted = np.unique(years)
@@ -1108,8 +1108,8 @@ def _rcmes_calc_average_on_new_time_unit(data, dates, 
unit):
         for day in days_sorted:
             index = np.where(days == day)[0]
             new_data[it, :] = ma.mean(data[index, :], axis=0)
-            y = int(day / 10000)
-            m = int(day % 10000) / 100
+            y = int(day // 10000)
+            m = int(day % 10000) // 100
             d = int(day % 100)
             new_date.append(datetime.datetime(year=y, month=m, day=d))
             it = it + 1

http://git-wip-us.apache.org/repos/asf/climate/blob/528fe168/ocw/evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index 51c2ead..6cb8f8f 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -16,13 +16,13 @@
 # under the License.
 
 '''
-Classes: 
+Classes:
     Evaluation - Container for running an evaluation
 '''
 
 import logging
-from metrics import Metric, UnaryMetric, BinaryMetric
-from dataset import Dataset, Bounds
+from ocw.metrics import Metric, UnaryMetric, BinaryMetric
+from ocw.dataset import Dataset, Bounds
 import ocw.dataset_processor as DSP
 
 import numpy.ma as ma
@@ -33,14 +33,14 @@ logger = logging.getLogger(__name__)
 class Evaluation(object):
     '''Container for running an evaluation
 
-    An *Evaluation* is the running of one or more metrics on one or more 
+    An *Evaluation* is the running of one or more metrics on one or more
     target datasets and a (possibly optional) reference dataset. Evaluation
     can handle two types of metrics, ``unary`` and ``binary``. The validity
     of an Evaluation is dependent upon the number and type of metrics as well
     as the number of datasets.
 
     A ``unary`` metric is a metric that runs over a single dataset. If you add
-    a ``unary`` metric to the Evaluation you are only required to add a 
+    a ``unary`` metric to the Evaluation you are only required to add a
     reference dataset or a target dataset. If there are multiple datasets
     in the evaluation then the ``unary`` metric is run over all of them.
 
@@ -50,7 +50,7 @@ class Evaluation(object):
     are run over every (reference dataset, target dataset) pair in the
     Evaluation.
 
-    An Evaluation must have at least one metric to be valid. 
+    An Evaluation must have at least one metric to be valid.
     '''
 
     def __init__(self, reference, targets, metrics, subregions=None):
@@ -59,11 +59,11 @@ class Evaluation(object):
         :param reference: The reference Dataset for the evaluation.
         :type reference: :class:`dataset.Dataset`
 
-        :param targets: A list of one or more target datasets for the 
+        :param targets: A list of one or more target datasets for the
                 evaluation.
         :type targets: :class:`list` of :class:`dataset.Dataset`
 
-        :param metrics: A list of one or more Metric instances to run 
+        :param metrics: A list of one or more Metric instances to run
                 in the evaluation.
         :type metrics: :class:`list` of :mod:`metrics`
 
@@ -71,7 +71,7 @@ class Evaluation(object):
                 evaluation. A subregion is specified with a Bounds object.
         :type subregions: :class:`list` of :class:`dataset.Bounds`
 
-        :raises: ValueError 
+        :raises: ValueError
         '''
         #: The reference dataset.
         self._ref_dataset = reference
@@ -140,7 +140,7 @@ class Evaluation(object):
     def add_dataset(self, target_dataset):
         '''Add a Dataset to the Evaluation.
 
-        A target Dataset is compared against the reference dataset when the 
+        A target Dataset is compared against the reference dataset when the
         Evaluation is run with one or more metrics.
 
         :param target_dataset: The target Dataset to add to the Evaluation.
@@ -161,7 +161,7 @@ class Evaluation(object):
     def add_datasets(self, target_datasets):
         '''Add multiple Datasets to the Evaluation.
 
-        :param target_datasets: The list of datasets that should be added to 
+        :param target_datasets: The list of datasets that should be added to
             the Evaluation.
         :type target_datasets: :class:`list` of :class:`dataset.Dataset`
 
@@ -217,7 +217,7 @@ class Evaluation(object):
         target dataset.
 
         If there is subregion information provided then each dataset is subset
-        before being run through the binary metrics. 
+        before being run through the binary metrics.
 
         ..note:: Only the binary metrics are subset with subregion information.
 

http://git-wip-us.apache.org/repos/asf/climate/blob/528fe168/ocw/plotter.py
----------------------------------------------------------------------
diff --git a/ocw/plotter.py b/ocw/plotter.py
index aedba6e..dafa8ad 100755
--- a/ocw/plotter.py
+++ b/ocw/plotter.py
@@ -146,10 +146,10 @@ def _fig_size(gridshape, aspect=None):
     nrows, ncols = gridshape
     if nrows >= ncols:
         # If more rows keep width constant
-        width, height = (aspect * 5.5), 5.5 * (nrows / ncols)
+        width, height = (aspect * 5.5), 5.5 * (nrows // ncols)
     else:
         # If more columns keep height constant
-        width, height = (aspect * 5.5) * (ncols / nrows), 5.5
+        width, height = (aspect * 5.5) * (ncols // nrows), 5.5
 
     return width, height
 
@@ -509,7 +509,7 @@ def draw_barchart(results, yvalues, fname, ptitle='', 
fmt='png',
     :type results: :class:`numpy.ndarray`
 
     :param yvalues: List of y-axis labels
-    :type times: :class:`list` 
+    :type times: :class:`list`
 
     :param fname: Filename of the plot.
     :type fname: :mod:`string`

http://git-wip-us.apache.org/repos/asf/climate/blob/528fe168/ocw/tests/parameter_values.npy
----------------------------------------------------------------------
diff --git a/ocw/tests/parameter_values.npy b/ocw/tests/parameter_values.npy
new file mode 100644
index 0000000..2eb12f0
Binary files /dev/null and b/ocw/tests/parameter_values.npy differ

Reply via email to