Revision: 6010
http://matplotlib.svn.sourceforge.net/matplotlib/?rev=6010&view=rev
Author: jswhit
Date: 2008-08-09 12:57:54 +0000 (Sat, 09 Aug 2008)
Log Message:
-----------
update to pupynere 1.0.2
Modified Paths:
--------------
trunk/toolkits/basemap/lib/mpl_toolkits/basemap/__init__.py
trunk/toolkits/basemap/lib/mpl_toolkits/basemap/netcdf.py
trunk/toolkits/basemap/lib/mpl_toolkits/basemap/pupynere.py
Modified: trunk/toolkits/basemap/lib/mpl_toolkits/basemap/__init__.py
===================================================================
--- trunk/toolkits/basemap/lib/mpl_toolkits/basemap/__init__.py 2008-08-08
20:52:12 UTC (rev 6009)
+++ trunk/toolkits/basemap/lib/mpl_toolkits/basemap/__init__.py 2008-08-09
12:57:54 UTC (rev 6010)
@@ -3641,19 +3641,25 @@
return corners
def NetCDFFile(file, mode='r', maskandscale=True):
- """NetCDF File reader. API is the same as Scientific.IO.NetCDF.
+ """NetCDF File reader/writer. API is the same as Scientific.IO.NetCDF.
+
If ``file`` is a URL that starts with `http`, it is assumed
- to be a remote OPenDAP dataset, and the python dap client is used
+ to be a remote OPenDAP dataset, and pydap is used
to retrieve the data. Only the OPenDAP Array and Grid data
types are recognized. If file does not start with `http`, it
- is assumed to be a local netCDF file. Data will
- automatically be converted to masked arrays if the variable has either
- a ``missing_value`` or ``_FillValue`` attribute, and some data points
- are equal to the value specified by that attribute. In addition,
- variables stored as integers that have the ``scale_factor`` and
- ``add_offset`` attribute will automatically be rescaled to floats when
- read. To suppress these automatic conversions, set the
- ``maskandscale`` keyword to False.
+ is assumed to be a local netCDF file and is read
+ with scipy.io.netcdf. Both pydap and scipy.io.netcdf are written
+ by Roberto De Almeida.
+
+ Data will
+ automatically be converted to and from masked arrays if the variable
+ has either a ``missing_value`` or ``_FillValue`` attribute, and
+ some data points are equal to the value specified by that attribute.
+ In addition, variables that have the ``scale_factor`` and ``add_offset``
+ attribute will automatically be converted to and from short integers.
+ To suppress these automatic conversions, set the ``maskandscale``
+ keyword to False.
+
"""
if file.startswith('http'):
return netcdf._RemoteFile(file,maskandscale=maskandscale)
Modified: trunk/toolkits/basemap/lib/mpl_toolkits/basemap/netcdf.py
===================================================================
--- trunk/toolkits/basemap/lib/mpl_toolkits/basemap/netcdf.py 2008-08-08
20:52:12 UTC (rev 6009)
+++ trunk/toolkits/basemap/lib/mpl_toolkits/basemap/netcdf.py 2008-08-09
12:57:54 UTC (rev 6010)
@@ -1,5 +1,5 @@
from numpy import ma, squeeze
-from pupynere import netcdf_file, _maskandscale
+from pupynere import netcdf_file, _unmaskandscale
from dap.client import open as open_remote
from dap.dtypes import ArrayType, GridType, typemap
@@ -68,7 +68,7 @@
# - create a masked array using missing_value or _FillValue attribute
# - apply scale_factor and add_offset to packed integer data
if self._maskandscale:
- return _maskandscale(self,datout)
+ return _unmaskandscale(self,datout)
else:
return datout
Modified: trunk/toolkits/basemap/lib/mpl_toolkits/basemap/pupynere.py
===================================================================
--- trunk/toolkits/basemap/lib/mpl_toolkits/basemap/pupynere.py 2008-08-08
20:52:12 UTC (rev 6009)
+++ trunk/toolkits/basemap/lib/mpl_toolkits/basemap/pupynere.py 2008-08-09
12:57:54 UTC (rev 6010)
@@ -128,9 +128,7 @@
"""
def __init__(self, filename, mode='r', maskandscale=False):
-
self._maskandscale = maskandscale
-
self.filename = filename
assert mode in 'rw', "Mode must be either 'r' or 'w'."
@@ -181,7 +179,7 @@
if size > 1: dtype_ += str(size)
data = empty(shape_, dtype=dtype_)
- self.variables[name] = netcdf_variable(data, typecode, shape,
dimensions)
+ self.variables[name] = netcdf_variable(data, typecode, shape,
dimensions, maskandscale=self._maskandscale)
return self.variables[name]
def flush(self):
@@ -204,7 +202,7 @@
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
- if not var.shape[0] and len(var.data) > self._recs:
+ if not var._shape[0] and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
@@ -238,7 +236,7 @@
# Sort variables non-recs first, then recs.
variables = self.variables.items()
- variables.sort(key=lambda (k, v): v.shape and v.shape[0] is not
None)
+ variables.sort(key=lambda (k, v): v._shape and v._shape[0] is not
None)
variables.reverse()
variables = [k for (k, v) in variables]
@@ -249,7 +247,7 @@
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
- if var.shape[0] is None])
+ if var._shape[0] is None])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
@@ -270,13 +268,13 @@
nc_type = REVERSE[var.typecode()]
self.fp.write(nc_type)
- if var.shape[0]:
+ if var._shape[0]:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
vsize = var.data[0].size * var.data.itemsize
rec_vars = len([var for var in self.variables.values()
- if var.shape[0] is None])
+ if var._shape[0] is None])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
@@ -296,7 +294,7 @@
self.fp.seek(the_beguine)
# Write data.
- if var.shape[0]:
+ if var._shape[0]:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write('0' * (var._vsize - count))
@@ -413,7 +411,7 @@
# Add variable.
self.variables[name] = netcdf_variable(
- data, typecode, shape, dimensions, attributes)
+ data, typecode, shape, dimensions, attributes,
maskandscale=self._maskandscale)
if rec_vars:
# Remove padding when only one record variable.
@@ -527,7 +525,7 @@
attribute of the ``netcdf_variable`` object.
"""
- def __init__(self, data, typecode, shape, dimensions, attributes=None,
maskandscale=True):
+ def __init__(self, data, typecode, shape, dimensions, attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._shape = shape
@@ -561,13 +559,15 @@
return self._typecode
def __getitem__(self, index):
- datout = squeeze(self.data[index])
+ data = squeeze(self.data[index])
if self._maskandscale:
- return _maskandscale(self,datout)
+ return _unmaskandscale(self,data)
else:
- return datout
+ return data
def __setitem__(self, index, data):
+ if self._maskandscale:
+ data = _maskandscale(self,data)
# Expand data for record vars?
if not self._shape[0]:
if isinstance(index, tuple):
@@ -600,24 +600,55 @@
'q':-2147483647L,
'f':9.9692099683868690e+36,
'd':9.9692099683868690e+36}
-def _maskandscale(var,datout):
- totalmask = zeros(datout.shape,bool)
- fillval = None
- if hasattr(var, 'missing_value') and (datout == var.missing_value).any():
- fillval = var.missing_value
- totalmask += datout==fillval
- if hasattr(var, '_FillValue') and (datout == var._FillValue).any():
- if fillval is None:
+def _unmaskandscale(var,data):
+ # if _maskandscale mode set to True, perform
+ # automatic unpacking using scale_factor/add_offset
+ # and automatic conversion to masked array using
+ # missing_value/_Fill_Value.
+ totalmask = zeros(data.shape, bool)
+ fill_value = None
+ if hasattr(var, 'missing_value') and (data == var.missing_value).any():
+ mask=data==var.missing_value
+ fill_value = var.missing_value
+ totalmask += mask
+ if hasattr(var, '_FillValue') and (data == var._FillValue).any():
+ mask=data==var._FillValue
+ if fill_value is None:
+ fill_value = var._FillValue
+ totalmask += mask
+ else:
+ fillval = _default_fillvals[var.typecode()]
+ if (data == fillval).any():
+ mask=data==fillval
+ if fill_value is None:
+ fill_value = fillval
+ totalmask += mask
+ # all values where data == missing_value or _FillValue are
+ # masked. fill_value set to missing_value if it exists,
+ # otherwise _FillValue.
+ if fill_value is not None:
+ data = ma.masked_array(data,mask=totalmask,fill_value=fill_value)
+ # if variable has scale_factor and add_offset attributes, rescale.
+ if hasattr(var, 'scale_factor') and hasattr(var, 'add_offset'):
+ data = var.scale_factor*data + var.add_offset
+ return data
+
+def _maskandscale(var,data):
+ # if _maskandscale mode set to True, perform
+ # automatic packing using scale_factor/add_offset
+ # and automatic filling of masked arrays using
+ # missing_value/_Fill_Value.
+ # use missing_value as fill value.
+ # if no missing value set, use _FillValue.
+ if hasattr(data,'mask'):
+ if hasattr(var, 'missing_value'):
+ fillval = var.missing_value
+ elif hasattr(var, '_FillValue'):
fillval = var._FillValue
- totalmask += datout==var._FillValue
- elif (datout == _default_fillvals[var.typecode()]).any():
- if fillval is None:
+ else:
fillval = _default_fillvals[var.typecode()]
- totalmask += datout==_default_fillvals[var.dtype]
- if fillval is not None:
- datout = ma.masked_array(datout,mask=totalmask,fill_value=fillval)
- try:
- datout = var.scale_factor*datout + var.add_offset
- except:
- pass
- return datout
+ data = data.filled(fill_value=fillval)
+ # pack using scale_factor and add_offset.
+ if hasattr(var, 'scale_factor') and hasattr(var, 'add_offset'):
+ data = (data - var.add_offset)/var.scale_factor
+ return data
This was sent by the SourceForge.net collaborative development platform, the
world's largest Open Source development site.
-------------------------------------------------------------------------
This SF.Net email is sponsored by the Moblin Your Move Developer's challenge
Build the coolest Linux based applications with Moblin SDK & win great prizes
Grand prize is a trip for two to an Open Source event anywhere in the world
http://moblin-contest.org/redirect.php?banner_id=100&url=/
_______________________________________________
Matplotlib-checkins mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/matplotlib-checkins