Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-asdf for openSUSE:Factory 
checked in at 2021-04-21 20:59:21
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-asdf (Old)
 and      /work/SRC/openSUSE:Factory/.python-asdf.new.12324 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-asdf"

Wed Apr 21 20:59:21 2021 rev:11 rq:886341 version:2.7.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-asdf/python-asdf.changes  2021-02-11 
12:53:04.197974948 +0100
+++ /work/SRC/openSUSE:Factory/.python-asdf.new.12324/python-asdf.changes       
2021-04-21 20:59:35.958220233 +0200
@@ -1,0 +2,9 @@
+Sat Apr 17 14:21:09 UTC 2021 - Ben Greiner <[email protected]>
+
+- Update to version 2.7.3
+  * Fix bug resulting in invalid strides values for views over FITS
+    arrays.
+  * Add pytest plugin options to skip and xfail individual tests
+    and xfail the unsupported ndarray-1.0.0 schema example.
+
+-------------------------------------------------------------------

Old:
----
  asdf-2.7.2.tar.gz

New:
----
  asdf-2.7.3.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-asdf.spec ++++++
--- /var/tmp/diff_new_pack.NhyYoN/_old  2021-04-21 20:59:36.490221071 +0200
+++ /var/tmp/diff_new_pack.NhyYoN/_new  2021-04-21 20:59:36.490221071 +0200
@@ -21,10 +21,10 @@
 # current astropy in TW requires python >= 3.7
 %define         skip_python36 1
 Name:           python-asdf
-Version:        2.7.2
+Version:        2.7.3
 Release:        0
 Summary:        Python tools to handle ASDF files
-License:        BSD-3-Clause AND BSD-2-Clause
+License:        BSD-2-Clause AND BSD-3-Clause
 URL:            https://github.com/asdf-format/asdf
 Source0:        
https://files.pythonhosted.org/packages/source/a/asdf/asdf-%{version}.tar.gz
 BuildRequires:  %{python_module setuptools >= 30.3.0}
@@ -39,7 +39,7 @@
 Requires:       python-semantic_version >= 2.8
 Recommends:     python-lz4 >= 0.10
 Requires(post): update-alternatives
-Requires(postun): update-alternatives
+Requires(postun):update-alternatives
 BuildArch:      noarch
 # SECTION test requirements
 BuildRequires:  %{python_module PyYAML >= 3.10}

++++++ asdf-2.7.2.tar.gz -> asdf-2.7.3.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/.github/workflows/asdf_ci.yml 
new/asdf-2.7.3/.github/workflows/asdf_ci.yml
--- old/asdf-2.7.2/.github/workflows/asdf_ci.yml        2021-01-19 
17:59:32.000000000 +0100
+++ new/asdf-2.7.3/.github/workflows/asdf_ci.yml        2021-02-25 
23:57:56.000000000 +0100
@@ -1,4 +1,4 @@
-name: ASDF CI
+name: CI
 
 on:
   push:
@@ -43,6 +43,11 @@
             python-version: 3.5
             toxenv: py35
 
+          - name: Python 3.5 with legacy packages
+            os: ubuntu-latest
+            python-version: 3.5
+            toxenv: py35-legacy
+
           - name: Documentation Build
             os: ubuntu-latest
             python-version: 3.8
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/.github/workflows/downstream.yml 
new/asdf-2.7.3/.github/workflows/downstream.yml
--- old/asdf-2.7.2/.github/workflows/downstream.yml     1970-01-01 
01:00:00.000000000 +0100
+++ new/asdf-2.7.3/.github/workflows/downstream.yml     2021-02-25 
23:57:56.000000000 +0100
@@ -0,0 +1,83 @@
+name: Downstream
+
+on:
+  workflow_dispatch:
+    inputs:
+      asdf_ref:
+        description: asdf ref
+        required: true
+        default: master
+      astropy_ref:
+        description: astropy ref
+        required: true
+        default: master
+      gwcs_ref:
+        description: gwcs ref
+        required: true
+        default: master
+      jwst_ref:
+        description: jwst ref
+        required: true
+        default: master
+      specutils_ref:
+        description: specutils ref
+        required: true
+        default: master
+  schedule:
+    # Run every Monday at 6am UTC
+    - cron: '0 6 * * 1'
+
+env:
+  CRDS_SERVER_URL: https://jwst-crds.stsci.edu
+  CRDS_PATH: ~/crds_cache
+  CRDS_CLIENT_RETRY_COUNT: 3
+  CRDS_CLIENT_RETRY_DELAY_SECONDS: 20
+
+jobs:
+  common:
+    name: ${{ matrix.package_name }}@${{ matrix.ref }} unit tests
+    runs-on: ubuntu-latest
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - package_name: gwcs
+            repository: spacetelescope/gwcs
+            ref: ${{ github.event.inputs.gwcs_ref || 'master' }}
+          - package_name: jwst
+            repository: spacetelescope/jwst
+            ref: ${{ github.event.inputs.jwst_ref || 'master' }}
+          - package_name: specutils
+            repository: astropy/specutils
+            ref: ${{ github.event.inputs.specutils_ref || 'master' }}
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          fetch-depth: 0
+          repository: ${{ matrix.repository }}
+          ref: ${{ matrix.ref }}
+      - name: Set up Python 3.9
+        uses: actions/setup-python@v2
+        with:
+          python-version: 3.9
+      - name: Install asdf
+        run: pip install git+https://github.com/asdf-format/asdf@${{ 
github.event.inputs.asdf_ref || 'master' }}
+      - name: Install remaining ${{ matrix.package_name }} dependencies
+        run: pip install .[test]
+      - name: Run ${{ matrix.package_name}} tests
+        run: pytest
+
+  astropy:
+    name: astropy@${{ github.event.inputs.astropy_ref || 'master' }} unit tests
+    runs-on: ubuntu-latest
+    steps:
+      - name: Set up Python 3.9
+        uses: actions/setup-python@v2
+        with:
+          python-version: 3.9
+      - name: Install asdf
+        run: pip install git+https://github.com/asdf-format/asdf@${{ 
github.event.inputs.asdf_ref || 'master' }}
+      - name: Install astropy
+        run: pip install git+https://github.com/astropy/astropy@${{ 
github.event.inputs.astropy_ref || 'master' }}#egg=astropy[test]
+      - name: Run astropy tests
+        run: pytest --pyargs astropy
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/CHANGES.rst new/asdf-2.7.3/CHANGES.rst
--- old/asdf-2.7.2/CHANGES.rst  2021-01-19 17:59:32.000000000 +0100
+++ new/asdf-2.7.3/CHANGES.rst  2021-02-25 23:57:56.000000000 +0100
@@ -1,4 +1,13 @@
-2.7.2 (2020-01-15)
+2.7.3 (2021-02-25)
+------------------
+
+- Add pytest plugin options to skip and xfail individual tests
+  and xfail the unsupported ndarray-1.0.0 example. [#929]
+
+- Fix bug resulting in invalid strides values for views over
+  FITS arrays. [#930]
+
+2.7.2 (2021-01-15)
 ------------------
 
 - Fix bug causing test collection failures in some environments. [#889]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/PKG-INFO new/asdf-2.7.3/PKG-INFO
--- old/asdf-2.7.2/PKG-INFO     2021-01-19 18:00:11.000000000 +0100
+++ new/asdf-2.7.3/PKG-INFO     2021-02-25 23:59:51.500090600 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: asdf
-Version: 2.7.2
+Version: 2.7.3
 Summary: Python tools to handle ASDF files
 Home-page: http://github.com/asdf-format/asdf
 Author: The ASDF Developers
@@ -361,6 +361,6 @@
 Classifier: Development Status :: 5 - Production/Stable
 Requires-Python: >=3.5
 Description-Content-Type: text/x-rst
-Provides-Extra: docs
 Provides-Extra: all
+Provides-Extra: docs
 Provides-Extra: tests
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/block.py new/asdf-2.7.3/asdf/block.py
--- old/asdf-2.7.2/asdf/block.py        2021-01-19 17:59:33.000000000 +0100
+++ new/asdf-2.7.3/asdf/block.py        2021-02-25 23:57:56.000000000 +0100
@@ -854,13 +854,6 @@
         """
         return self.offset + self.header_size + self.allocated
 
-    def override_byteorder(self, byteorder):
-        """
-        Hook to permit overriding the byteorder value stored in the
-        tree.  This is used to support blocks stored in FITS files.
-        """
-        return byteorder
-
     @property
     def trust_data_dtype(self):
         """
@@ -913,11 +906,11 @@
         else:
             self._checksum = checksum
 
-    def _calculate_checksum(self, data):
+    def _calculate_checksum(self, array):
         # The following line is safe because we're only using
         # the MD5 as a checksum.
         m = hashlib.new('md5') # nosec
-        m.update(self.data.ravel('K'))
+        m.update(array)
         return m.digest()
 
     def validate_checksum(self):
@@ -932,7 +925,7 @@
             `False`.
         """
         if self._checksum:
-            checksum = self._calculate_checksum(self.data)
+            checksum = self._calculate_checksum(self._flattened_data)
             if checksum != self._checksum:
                 return False
         return True
@@ -941,7 +934,7 @@
         """
         Update the checksum based on the current data contents.
         """
-        self._checksum = self._calculate_checksum(self.data)
+        self._checksum = self._calculate_checksum(self._flattened_data)
 
     def update_size(self):
         """
@@ -950,13 +943,14 @@
         updating the file in-place, otherwise the work is redundant.
         """
         if self._data is not None:
-            self._data_size = self._data.data.nbytes
+            data = self._flattened_data
+            self._data_size = data.nbytes
 
             if not self.output_compression:
                 self._size = self._data_size
             else:
                 self._size = mcompression.get_compressed_size(
-                    self._data, self.output_compression)
+                    data, self.output_compression)
         else:
             self._data_size = self._size = 0
 
@@ -1101,22 +1095,45 @@
             self._data = self._fd.memmap_array(self.data_offset, self._size)
             self._memmapped = True
 
+    @property
+    def _flattened_data(self):
+        """
+        Retrieve flattened data suitable for writing.
+
+        Returns
+        -------
+        np.ndarray
+            1D contiguous array.
+        """
+        data = self.data
+
+        # 'K' order flattens the array in the order that elements
+        # occur in memory, except axes with negative strides which
+        # are reversed.  That is a problem for base arrays with
+        # negative strides and is an outstanding bug in this library.
+        return data.ravel(order='K')
+
     def write(self, fd):
         """
         Write an internal block to the given Python file-like object.
         """
         self._header_size = self._header.size
 
+        if self._data is not None:
+            data = self._flattened_data
+        else:
+            data = None
+
         flags = 0
         data_size = used_size = allocated_size = 0
         if self._array_storage == 'streamed':
             flags |= constants.BLOCK_FLAG_STREAMED
-        elif self._data is not None:
-            self.update_checksum()
-            data_size = self._data.nbytes
+        elif data is not None:
+            self._checksum = self._calculate_checksum(data)
+            data_size = data.nbytes
             if not fd.seekable() and self.output_compression:
                 buff = io.BytesIO()
-                mcompression.compress(buff, self._data,
+                mcompression.compress(buff, data,
                                       self.output_compression)
                 self.allocated = self._size = buff.tell()
             allocated_size = self.allocated
@@ -1141,7 +1158,7 @@
             used_size=used_size, data_size=data_size,
             checksum=checksum))
 
-        if self._data is not None:
+        if data is not None:
             if self.output_compression:
                 if not fd.seekable():
                     fd.write(buff.getvalue())
@@ -1152,7 +1169,7 @@
                     # header.
                     start = fd.tell()
                     mcompression.compress(
-                        fd, self._data, self.output_compression)
+                        fd, data, self.output_compression)
                     end = fd.tell()
                     self.allocated = self._size = end - start
                     fd.seek(self.offset + 6)
@@ -1164,7 +1181,7 @@
             else:
                 if used_size != data_size:
                     raise RuntimeError("Block used size {} is not equal to the 
data size {}".format(used_size, data_size))
-                fd.write_array(self._data)
+                fd.write_array(data)
 
     @property
     def data(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/fits_embed.py 
new/asdf-2.7.3/asdf/fits_embed.py
--- old/asdf-2.7.2/asdf/fits_embed.py   2021-01-19 17:59:33.000000000 +0100
+++ new/asdf-2.7.3/asdf/fits_embed.py   2021-02-25 23:57:56.000000000 +0100
@@ -50,13 +50,6 @@
     def array_storage(self):
         return 'fits'
 
-    def override_byteorder(self, byteorder):
-        # FITS data is always stored in big-endian byte order.
-        # The data array may not report big-endian, but we want
-        # the value written to the tree to match the actual
-        # byte order on disk.
-        return 'big'
-
     @property
     def trust_data_dtype(self):
         # astropy.io.fits returns arrays in native byte order
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/generic_io.py 
new/asdf-2.7.3/asdf/generic_io.py
--- old/asdf-2.7.2/asdf/generic_io.py   2021-01-15 21:32:59.000000000 +0100
+++ new/asdf-2.7.3/asdf/generic_io.py   2021-02-25 23:57:56.000000000 +0100
@@ -94,7 +94,7 @@
 """
 
 
-def _array_tofile_chunked(write, array, chunksize):  # pragma: no cover
+def _array_tofile_chunked(write, array, chunksize):
     array = array.view(np.uint8)
     for i in range(0, array.nbytes, chunksize):
         write(array[i:i + chunksize].data)
@@ -103,8 +103,7 @@
 def _array_tofile_simple(fd, write, array):
     return write(array.data)
 
-
-if sys.platform == 'darwin':  # pragma: no cover
+if sys.platform == 'darwin':
     def _array_tofile(fd, write, array):
         # This value is currently set as a workaround for a known bug in Python
         # on OSX. Individual writes must be less than 2GB, which necessitates
@@ -114,7 +113,7 @@
         if fd is None or array.nbytes >= OSX_WRITE_LIMIT and array.nbytes % 
4096 == 0:
             return _array_tofile_chunked(write, array, OSX_WRITE_LIMIT)
         return _array_tofile_simple(fd, write, array)
-elif sys.platform.startswith('win'):  # pragma: no cover
+elif sys.platform.startswith('win'):
     def _array_tofile(fd, write, array):
         WIN_WRITE_LIMIT = 2 ** 30
         return _array_tofile_chunked(write, array, WIN_WRITE_LIMIT)
@@ -372,7 +371,20 @@
     """
 
     def write_array(self, array):
-        _array_tofile(None, self.write, array.ravel(order='K'))
+        """
+        Write array content to the file.  Array must be 1D contiguous
+        so that this method can avoid making assumptions about the
+        intended memory layout.  Endianness is preserved.
+
+        Parameters
+        ----------
+        array : np.ndarray
+            Must be 1D contiguous.
+        """
+        if len(array.shape) != 1 or not array.flags.contiguous:
+            raise ValueError("Requires 1D contiguous array.")
+
+        _array_tofile(None, self.write, array)
 
     def seek(self, offset, whence=0):
         """
@@ -751,7 +763,10 @@
             arr.flush()
             self.fast_forward(len(arr.data))
         else:
-            _array_tofile(self._fd, self._fd.write, arr.ravel(order='K'))
+            if len(arr.shape) != 1 or not arr.flags.contiguous:
+                raise ValueError("Requires 1D contiguous array.")
+
+            _array_tofile(self._fd, self._fd.write, arr)
 
     def can_memmap(self):
         return True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/tags/core/ndarray.py 
new/asdf-2.7.3/asdf/tags/core/ndarray.py
--- old/asdf-2.7.2/asdf/tags/core/ndarray.py    2021-01-19 17:59:33.000000000 
+0100
+++ new/asdf-2.7.3/asdf/tags/core/ndarray.py    2021-02-25 23:57:56.000000000 
+0100
@@ -85,7 +85,10 @@
     raise ValueError("Unknown datatype {0}".format(datatype))
 
 
-def numpy_byteorder_to_asdf_byteorder(byteorder):
+def numpy_byteorder_to_asdf_byteorder(byteorder, override=None):
+    if override is not None:
+        return override
+
     if byteorder == '=':
         return sys.byteorder
     elif byteorder == '<':
@@ -94,7 +97,7 @@
         return 'big'
 
 
-def numpy_dtype_to_asdf_datatype(dtype, include_byteorder=True):
+def numpy_dtype_to_asdf_datatype(dtype, include_byteorder=True, 
override_byteorder=None):
     dtype = np.dtype(dtype)
     if dtype.names is not None:
         fields = []
@@ -102,30 +105,30 @@
             field = dtype.fields[name][0]
             d = {}
             d['name'] = name
-            field_dtype, byteorder = numpy_dtype_to_asdf_datatype(field)
+            field_dtype, byteorder = numpy_dtype_to_asdf_datatype(field, 
override_byteorder=override_byteorder)
             d['datatype'] = field_dtype
             if include_byteorder:
                 d['byteorder'] = byteorder
             if field.shape:
                 d['shape'] = list(field.shape)
             fields.append(d)
-        return fields, numpy_byteorder_to_asdf_byteorder(dtype.byteorder)
+        return fields, numpy_byteorder_to_asdf_byteorder(dtype.byteorder, 
override=override_byteorder)
 
     elif dtype.subdtype is not None:
-        return numpy_dtype_to_asdf_datatype(dtype.subdtype[0])
+        return numpy_dtype_to_asdf_datatype(dtype.subdtype[0], 
override_byteorder=override_byteorder)
 
     elif dtype.name in _datatype_names:
-        return dtype.name, numpy_byteorder_to_asdf_byteorder(dtype.byteorder)
+        return dtype.name, numpy_byteorder_to_asdf_byteorder(dtype.byteorder, 
override=override_byteorder)
 
     elif dtype.name == 'bool':
-        return 'bool8', numpy_byteorder_to_asdf_byteorder(dtype.byteorder)
+        return 'bool8', numpy_byteorder_to_asdf_byteorder(dtype.byteorder, 
override=override_byteorder)
 
     elif dtype.name.startswith('string') or dtype.name.startswith('bytes'):
         return ['ascii', dtype.itemsize], 'big'
 
     elif dtype.name.startswith('unicode') or dtype.name.startswith('str'):
         return (['ucs4', int(dtype.itemsize / 4)],
-                numpy_byteorder_to_asdf_byteorder(dtype.byteorder))
+                numpy_byteorder_to_asdf_byteorder(dtype.byteorder, 
override=override_byteorder))
 
     raise ValueError("Unknown dtype {0}".format(dtype))
 
@@ -414,20 +417,55 @@
 
     @classmethod
     def to_tree(cls, data, ctx):
+        # The ndarray-1.0.0 schema does not permit 0 valued strides.
+        # Perhaps we'll want to allow this someday, to efficiently
+        # represent an array of all the same value.
         if any(stride == 0 for stride in data.strides):
             data = np.ascontiguousarray(data)
 
-        base = util.get_array_base(data)
         shape = data.shape
-        dtype = data.dtype
-        offset = data.ctypes.data - base.ctypes.data
 
-        if data.flags.c_contiguous:
+        block = ctx.blocks.find_or_create_block_for_array(data, ctx)
+
+        if block.array_storage == "fits":
+            # Views over arrays stored in FITS files have some idiosyncracies.
+            # astropy.io.fits always writes arrays C-contiguous with big-endian
+            # byte order, whereas asdf preserves the "contiguousity" and byte 
order
+            # of the base array.
+            if (block.data.shape != data.shape or
+                block.data.dtype.itemsize != data.dtype.itemsize or
+                block.data.ctypes.data != data.ctypes.data or
+                block.data.strides != data.strides):
+                raise ValueError(
+                    "ASDF has only limited support for serializing views over 
arrays stored "
+                    "in FITS HDUs.  This error likely means that a slice of 
such an array "
+                    "was found in the ASDF tree.  The slice can be decoupled 
from the FITS "
+                    "array by calling copy() before assigning it to the tree."
+                )
+
+            offset = 0
             strides = None
+            dtype, byteorder = numpy_dtype_to_asdf_datatype(
+                data.dtype,
+                include_byteorder=(block.array_storage != "inline"),
+                override_byteorder="big",
+            )
         else:
-            strides = data.strides
+            # Compute the offset relative to the base array and not the
+            # block data, in case the block is compressed.
+            base = util.get_array_base(data)
 
-        block = ctx.blocks.find_or_create_block_for_array(data, ctx)
+            offset = data.ctypes.data - base.ctypes.data
+
+            if data.flags.c_contiguous:
+                strides = None
+            else:
+                strides = data.strides
+
+            dtype, byteorder = numpy_dtype_to_asdf_datatype(
+                data.dtype,
+                include_byteorder=(block.array_storage != "inline"),
+            )
 
         result = {}
 
@@ -435,11 +473,6 @@
         if block.array_storage == 'streamed':
             result['shape'][0] = '*'
 
-        dtype, byteorder = numpy_dtype_to_asdf_datatype(
-            dtype, include_byteorder=(block.array_storage != 'inline'))
-
-        byteorder = block.override_byteorder(byteorder)
-
         if block.array_storage == 'inline':
             listdata = numpy_array_to_list(data)
             result['data'] = listdata
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/tags/core/tests/test_ndarray.py 
new/asdf-2.7.3/asdf/tags/core/tests/test_ndarray.py
--- old/asdf-2.7.2/asdf/tags/core/tests/test_ndarray.py 2021-01-19 
17:59:33.000000000 +0100
+++ new/asdf-2.7.3/asdf/tags/core/tests/test_ndarray.py 2021-02-25 
23:57:56.000000000 +0100
@@ -371,7 +371,7 @@
 
 def test_string_table(tmpdir):
     tree = {
-        'table': np.array([(b'foo', '??????????????????????????????', 42, 
53.0)])
+        'table': np.array([(b'foo', '??????????????????????????????', '42', 
'53.0')])
         }
 
     helpers.assert_roundtrip_tree(tree, tmpdir)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf/tests/test_fits_embed.py 
new/asdf-2.7.3/asdf/tests/test_fits_embed.py
--- old/asdf-2.7.2/asdf/tests/test_fits_embed.py        2021-01-19 
17:59:33.000000000 +0100
+++ new/asdf-2.7.3/asdf/tests/test_fits_embed.py        2021-02-25 
23:57:56.000000000 +0100
@@ -397,6 +397,7 @@
         data = ff.tree['my_table']
         assert data._source.startswith('fits:')
 
+
 def test_extension_check():
     testfile = get_test_data_path('extension_check.fits')
 
@@ -424,6 +425,7 @@
     with fits.open(tmpfile) as hdu:
         hdu.verify('exception')
 
+
 def test_dangling_file_handle(tmpdir):
     """
     This tests the bug fix introduced in #533. Without the bug fix, this test
@@ -457,3 +459,67 @@
     gc.collect()
 
     del ctx
+
+
+def test_array_view(tmp_path):
+    """
+    Special handling is required when a view over a larger array
+    is assigned to an HDU and referenced from the ASDF tree.
+    """
+    file_path = str(tmp_path / "test.fits")
+
+    data = np.zeros((10, 10))
+    data_view = data[:, :5]
+
+    hdul = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data_view)])
+    with asdf.fits_embed.AsdfInFits(hdulist=hdul) as af:
+        af["data"] = hdul[-1].data
+        af.write_to(file_path)
+
+    with asdf.open(file_path) as af:
+        assert_array_equal(af["data"], data_view)
+
+
+def test_array_view_compatible_layout(tmp_path):
+    """
+    We should be able to serialize additional views that have
+    the same memory layout.
+    """
+    file_path = str(tmp_path / "test.fits")
+
+    data = np.zeros((10, 10), dtype=np.float64)
+    data_view = data[:, :5]
+    other_view = data_view[:, :]
+    different_dtype_view = data_view.view(np.int64)
+
+    hdul = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data_view)])
+    with asdf.fits_embed.AsdfInFits(hdulist=hdul) as af:
+        af["data"] = hdul[-1].data
+        af["other"] = other_view
+        af["different_dtype"] = different_dtype_view
+        af.write_to(file_path)
+
+    with asdf.open(file_path) as af:
+        assert_array_equal(af["data"], data_view)
+        assert_array_equal(af["other"], other_view)
+        assert_array_equal(af["other"], different_dtype_view)
+
+
+def test_array_view_different_layout(tmp_path):
+    """
+    A view over the FITS array with a different memory layout
+    might end up corrupted when astropy.io.fits changes the
+    array to C-contiguous and big-endian on write.
+    """
+    file_path = str(tmp_path / "test.fits")
+
+    data = np.zeros((10, 10))
+    data_view = data[:, :5]
+    other_view = data_view[:, ::-1]
+
+    hdul = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data_view)])
+    with asdf.fits_embed.AsdfInFits(hdulist=hdul) as af:
+        af["fits"] = hdul[-1].data
+        af["other"] = other_view
+        with pytest.raises(ValueError, match="ASDF has only limited support 
for serializing views over arrays stored in FITS HDUs"):
+            af.write_to(file_path)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf.egg-info/PKG-INFO 
new/asdf-2.7.3/asdf.egg-info/PKG-INFO
--- old/asdf-2.7.2/asdf.egg-info/PKG-INFO       2021-01-19 18:00:10.000000000 
+0100
+++ new/asdf-2.7.3/asdf.egg-info/PKG-INFO       2021-02-25 23:59:50.000000000 
+0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: asdf
-Version: 2.7.2
+Version: 2.7.3
 Summary: Python tools to handle ASDF files
 Home-page: http://github.com/asdf-format/asdf
 Author: The ASDF Developers
@@ -361,6 +361,6 @@
 Classifier: Development Status :: 5 - Production/Stable
 Requires-Python: >=3.5
 Description-Content-Type: text/x-rst
-Provides-Extra: docs
 Provides-Extra: all
+Provides-Extra: docs
 Provides-Extra: tests
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/asdf.egg-info/SOURCES.txt 
new/asdf-2.7.3/asdf.egg-info/SOURCES.txt
--- old/asdf-2.7.2/asdf.egg-info/SOURCES.txt    2021-01-19 18:00:10.000000000 
+0100
+++ new/asdf-2.7.3/asdf.egg-info/SOURCES.txt    2021-02-25 23:59:50.000000000 
+0100
@@ -13,6 +13,7 @@
 setup.py
 tox.ini
 .github/workflows/asdf_ci.yml
+.github/workflows/downstream.yml
 .github/workflows/s390x.yml
 asdf/__init__.py
 asdf/_convenience.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/docs/asdf/changes.rst 
new/asdf-2.7.3/docs/asdf/changes.rst
--- old/asdf-2.7.2/docs/asdf/changes.rst        2021-01-19 17:59:33.000000000 
+0100
+++ new/asdf-2.7.3/docs/asdf/changes.rst        2021-02-25 23:57:56.000000000 
+0100
@@ -4,6 +4,19 @@
 Changes
 *******
 
+What's New in ASDF 2.7.3?
+=========================
+
+The ASDF Standard is at v1.5.0.
+
+Changes include:
+
+- Fix bug resulting in invalid strides values for views over
+  FITS arrays.
+
+- Add pytest plugin options to skip and xfail individual tests
+  and xfail the unsupported ndarray-1.0.0 schema example.
+
 What's New in ASDF 2.7.2?
 =========================
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/pytest_asdf/plugin.py 
new/asdf-2.7.3/pytest_asdf/plugin.py
--- old/asdf-2.7.2/pytest_asdf/plugin.py        2021-01-15 21:32:59.000000000 
+0100
+++ new/asdf-2.7.3/pytest_asdf/plugin.py        2021-02-25 23:57:56.000000000 
+0100
@@ -4,6 +4,7 @@
 import os
 from importlib.util import find_spec
 from pkg_resources import parse_version
+import pathlib
 
 import yaml
 import pytest
@@ -19,6 +20,12 @@
     parser.addini(
         "asdf_schema_skip_names", "Base names of files to skip in schema 
tests")
     parser.addini(
+        "asdf_schema_skip_tests",
+        "List of tests to skip, one per line, in format <schema path 
suffix>::<test name>")
+    parser.addini(
+        "asdf_schema_xfail_tests",
+        "List of tests to xfail, one per line, in format <schema path 
suffix>::<test name>")
+    parser.addini(
         "asdf_schema_skip_examples",
         "Base names of schemas whose examples should not be tested")
     parser.addini(
@@ -52,7 +59,7 @@
 class AsdfSchemaFile(pytest.File):
     @classmethod
     def from_parent(cls, parent, *, fspath, skip_examples=False, 
validate_default=True,
-        ignore_unrecognized_tag=False, ignore_version_mismatch=False, 
**kwargs):
+        ignore_unrecognized_tag=False, ignore_version_mismatch=False, 
skip_tests=[], xfail_tests=[], **kwargs):
         if hasattr(super(), "from_parent"):
             result = super().from_parent(parent, fspath=fspath, **kwargs)
         else:
@@ -62,19 +69,36 @@
         result.validate_default = validate_default
         result.ignore_unrecognized_tag = ignore_unrecognized_tag
         result.ignore_version_mismatch = ignore_version_mismatch
+        result.skip_tests = skip_tests
+        result.xfail_tests = xfail_tests
+
         return result
 
+    def _set_markers(self, item):
+        if item.name in self.skip_tests or "*" in self.skip_tests:
+            item.add_marker(pytest.mark.skip)
+        if item.name in self.xfail_tests or "*" in self.xfail_tests:
+            item.add_marker(pytest.mark.xfail)
+
     def collect(self):
-        yield AsdfSchemaItem.from_parent(self, self.fspath, 
validate_default=self.validate_default)
+        item = AsdfSchemaItem.from_parent(self, self.fspath, 
validate_default=self.validate_default, name="test_schema")
+        self._set_markers(item)
+        yield item
+
         if not self.skip_examples:
-            for example in self.find_examples_in_schema():
-                yield AsdfSchemaExampleItem.from_parent(
+            for index, example in enumerate(self.find_examples_in_schema()):
+                name = "test_example_{}".format(example)
+                item = AsdfSchemaExampleItem.from_parent(
                     self,
                     self.fspath,
                     example,
+                    index,
                     ignore_unrecognized_tag=self.ignore_unrecognized_tag,
                     ignore_version_mismatch=self.ignore_version_mismatch,
+                    name=name,
                 )
+                self._set_markers(item)
+                yield item
 
     def find_examples_in_schema(self):
         """Returns generator for all examples in schema at given path"""
@@ -95,9 +119,10 @@
     @classmethod
     def from_parent(cls, parent, schema_path, validate_default=True, **kwargs):
         if hasattr(super(), "from_parent"):
-            result = super().from_parent(parent, name=str(schema_path), 
**kwargs)
+            result = super().from_parent(parent, **kwargs)
         else:
-            result = AsdfSchemaItem(str(schema_path), parent, **kwargs)
+            name = kwargs.pop("name")
+            result = AsdfSchemaItem(name, parent, **kwargs)
 
         result.schema_path = schema_path
         result.validate_default = validate_default
@@ -113,6 +138,9 @@
             resolve_references=True)
         schema.check_schema(schema_tree, 
validate_default=self.validate_default)
 
+    def reportinfo(self):
+        return self.fspath, 0, ""
+
 
 ASTROPY_4_0_TAGS = {
     'tag:stsci.edu:asdf/transform/rotate_sequence_3d',
@@ -151,13 +179,14 @@
 
 class AsdfSchemaExampleItem(pytest.Item):
     @classmethod
-    def from_parent(cls, parent, schema_path, example,
+    def from_parent(cls, parent, schema_path, example, example_index,
         ignore_unrecognized_tag=False, ignore_version_mismatch=False, 
**kwargs):
-        test_name = "{}-example".format(schema_path)
         if hasattr(super(), "from_parent"):
-            result = super().from_parent(parent, name=test_name, **kwargs)
+            result = super().from_parent(parent, **kwargs)
         else:
-            result = AsdfSchemaExampleItem(test_name, parent, **kwargs)
+            name = kwargs.pop("name")
+            result = AsdfSchemaExampleItem(name, parent, **kwargs)
+
         result.filename = str(schema_path)
         result.example = example
         result.ignore_unrecognized_tag = ignore_unrecognized_tag
@@ -226,6 +255,31 @@
             buff = io.BytesIO()
             ff.write_to(buff)
 
+    def reportinfo(self):
+        return self.fspath, 0, ""
+
+
+def _parse_test_list(content):
+    result = {}
+
+    for line in content.split("\n"):
+        line = line.strip()
+        if len(line) > 0:
+            parts = line.split("::", 1)
+            path_suffix = pathlib.Path(parts[0]).as_posix()
+
+            if len(parts) == 1:
+                name = "*"
+            else:
+                name = parts[-1]
+
+            if path_suffix not in result:
+                result[path_suffix] = []
+
+            result[path_suffix].append(name)
+
+    return result
+
 
 def pytest_collect_file(path, parent):
     if not (parent.config.getini('asdf_schema_tests_enabled') or
@@ -242,6 +296,9 @@
     ignore_unrecognized_tag = 
parent.config.getini('asdf_schema_ignore_unrecognized_tag')
     ignore_version_mismatch = 
parent.config.getini('asdf_schema_ignore_version_mismatch')
 
+    skip_tests = 
_parse_test_list(parent.config.getini('asdf_schema_skip_tests'))
+    xfail_tests = 
_parse_test_list(parent.config.getini('asdf_schema_xfail_tests'))
+
     schema_roots = [os.path.join(str(parent.config.rootdir), 
os.path.normpath(root))
                         for root in schema_roots]
 
@@ -250,6 +307,16 @@
 
     for root in schema_roots:
         if str(path).startswith(root) and path.purebasename not in skip_names:
+            posix_path = pathlib.Path(str(path)).as_posix()
+            schema_skip_tests = []
+            for suffix, names in skip_tests.items():
+                if posix_path.endswith(suffix):
+                    schema_skip_tests.extend(names)
+            schema_xfail_tests = []
+            for suffix, names in xfail_tests.items():
+                if posix_path.endswith(suffix):
+                    schema_xfail_tests.extend(names)
+
             return AsdfSchemaFile.from_parent(
                 parent,
                 fspath=path,
@@ -257,6 +324,8 @@
                 validate_default=validate_default,
                 ignore_unrecognized_tag=ignore_unrecognized_tag,
                 ignore_version_mismatch=ignore_version_mismatch,
+                skip_tests=schema_skip_tests,
+                xfail_tests=schema_xfail_tests,
             )
 
     return None
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/setup.cfg new/asdf-2.7.3/setup.cfg
--- old/asdf-2.7.2/setup.cfg    2021-01-19 18:00:11.000000000 +0100
+++ new/asdf-2.7.3/setup.cfg    2021-02-25 23:59:51.501266000 +0100
@@ -50,8 +50,6 @@
        matplotlib
        docutils
 tests = 
-       # As of 2020-07-28, pytest-doctestplus is not compatible
-       # with pytest 6.0.0.
        pytest<6
        astropy
        gwcs
@@ -89,20 +87,25 @@
 open_files_ignore = test.fits asdf.fits
 filterwarnings = 
        ignore::asdf.exceptions.AsdfDeprecationWarning:asdf.asdftypes
+       ignore:numpy.ndarray size 
changed:astropy.utils.exceptions.AstropyWarning
+       ignore:numpy.ndarray size changed:RuntimeWarning
 text_file_format = rst
 asdf_schema_root = asdf-standard/schemas asdf/schemas
-asdf_schema_skip_names = 
-       asdf-schema-1.0.0
-       draft-01
-       celestial_frame-1.0.0
-       celestial_frame-1.1.0
-       frame-1.1.0
-       spectral_frame-1.1.0
-       step-1.1.0
-       step-1.2.0
-       wcs-1.1.0
-       wcs-1.2.0
-asdf_schema_skip_examples = domain-1.0.0 frame-1.0.0
+asdf_schema_skip_tests = 
+       stsci.edu/asdf/asdf-schema-1.0.0.yaml
+       stsci.edu/asdf/transform/domain-1.0.0.yaml
+       stsci.edu/asdf/wcs/celestial_frame-1.0.0.yaml
+       stsci.edu/asdf/wcs/celestial_frame-1.1.0.yaml
+       stsci.edu/asdf/wcs/frame-1.0.0.yaml
+       stsci.edu/asdf/wcs/frame-1.1.0.yaml
+       stsci.edu/asdf/wcs/spectral_frame-1.1.0.yaml
+       stsci.edu/asdf/wcs/step-1.1.0.yaml
+       stsci.edu/asdf/wcs/step-1.2.0.yaml
+       stsci.edu/asdf/wcs/wcs-1.1.0.yaml
+       stsci.edu/asdf/wcs/wcs-1.2.0.yaml
+       stsci.edu/yaml-schema/draft-01.yaml
+asdf_schema_xfail_tests = 
+       stsci.edu/asdf/core/ndarray-1.0.0.yaml::test_example_2
 asdf_schema_tests_enabled = true
 asdf_schema_ignore_unrecognized_tag = true
 addopts = --doctest-rst
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asdf-2.7.2/tox.ini new/asdf-2.7.3/tox.ini
--- old/asdf-2.7.2/tox.ini      2021-01-19 17:59:33.000000000 +0100
+++ new/asdf-2.7.3/tox.ini      2021-02-25 23:57:56.000000000 +0100
@@ -19,6 +19,7 @@
     legacy: pyyaml==3.10
     legacy: jsonschema==3.0.2
     legacy: numpy~=1.10.0
+    legacy: pytest~=4.6.11
     numpy11,numpy12,legacy: astropy~=3.0.0
     numpy11: numpy==1.11
     numpy12: numpy==1.12
@@ -40,7 +41,10 @@
 
 [testenv:warnings]
 commands=
-    pytest --remote-data -W error -W 
ignore::asdf.exceptions.AsdfDeprecationWarning:asdf.asdftypes
+    pytest --remote-data -W error \
+      -W ignore::asdf.exceptions.AsdfDeprecationWarning:asdf.asdftypes \
+      -W 'ignore:numpy.ndarray size 
changed:astropy.utils.exceptions.AstropyWarning' \
+      -W 'ignore:numpy.ndarray size changed:RuntimeWarning'
 
 [testenv:packaged]
 # The default tox working directory is in .tox in the source directory.  If we

Reply via email to