Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package dlpack for openSUSE:Factory checked 
in at 2026-02-06 19:09:03
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/dlpack (Old)
 and      /work/SRC/openSUSE:Factory/.dlpack.new.1670 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "dlpack"

Fri Feb  6 19:09:03 2026 rev:5 rq:1331533 version:1.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/dlpack/dlpack.changes    2025-09-29 
16:36:20.656790491 +0200
+++ /work/SRC/openSUSE:Factory/.dlpack.new.1670/dlpack.changes  2026-02-06 
19:14:35.810822376 +0100
@@ -1,0 +2,13 @@
+Fri Feb  6 08:10:55 UTC 2026 - Guillaume GARDET <[email protected]>
+
+- Update to 1.3:
+  * [DOCS] Include exchange api in the docs by @tqchen in #181
+  * Update exchange API to be capsule following new convention
+    by @tqchen in #180
+- Skipped to 1.2:
+  * Add support for Trainium device in #168
+  * Enforce strides to be not null when ndim is nonzero in #178
+  * Support DLPACK C Functions for Speed Exchange and Stream
+    Handling in #174
+
+-------------------------------------------------------------------

Old:
----
  dlpack-1.1.tar.gz

New:
----
  dlpack-1.3.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ dlpack.spec ++++++
--- /var/tmp/diff_new_pack.b0QSGb/_old  2026-02-06 19:14:37.550895681 +0100
+++ /var/tmp/diff_new_pack.b0QSGb/_new  2026-02-06 19:14:37.566896355 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package dlpack
 #
-# Copyright (c) 2025 SUSE LLC and contributors
+# Copyright (c) 2026 SUSE LLC and contributors
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -17,13 +17,13 @@
 
 
 Name:           dlpack
-Version:        1.1
+Version:        1.3
 Release:        0
 Summary:        DLPack: Open In Memory Tensor Structure
 License:        Apache-2.0
 URL:            https://github.com/dmlc/dlpack
 Source0:        
https://github.com/dmlc/dlpack/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
-BuildRequires:  cmake
+BuildRequires:  cmake >= 3.16
 BuildRequires:  gcc-c++
 
 %description

++++++ dlpack-1.1.tar.gz -> dlpack-1.3.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-1.1/CMakeLists.txt 
new/dlpack-1.3/CMakeLists.txt
--- old/dlpack-1.1/CMakeLists.txt       2025-03-10 20:12:16.000000000 +0100
+++ new/dlpack-1.3/CMakeLists.txt       2026-01-24 07:37:33.000000000 +0100
@@ -1,8 +1,10 @@
 ###
-# Set minimum version of CMake. Since command 'project' use
-# VERSION sub-option we need at least 3.0.
-# Note: If you use 2.6 or 2.4, God kills a kitten. Seriously.
-cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
+# Set minimum version of CMake. CMake 4.0 drops support for CMake<3.06 so that
+# is the absolute lowest that we could allow without blocking the usage of new
+# versions of CMake. Using 3.16 since it is the supported version on Ubuntu
+# 20.04 LTS (using that as a representative distro for our desired level of
+# backwards compatibility).
+cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
 
 ####
 # Set variables:
Binary files old/dlpack-1.1/docs/source/_static/images/DLPack_diagram.png and 
new/dlpack-1.3/docs/source/_static/images/DLPack_diagram.png differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-1.1/docs/source/c_api.rst 
new/dlpack-1.3/docs/source/c_api.rst
--- old/dlpack-1.1/docs/source/c_api.rst        2025-03-10 20:12:16.000000000 
+0100
+++ new/dlpack-1.3/docs/source/c_api.rst        2026-01-24 07:37:33.000000000 
+0100
@@ -18,6 +18,8 @@
 
 .. doxygendefine:: DLPACK_FLAG_BITMASK_IS_COPIED
 
+.. doxygendefine:: DLPACK_FLAG_BITMASK_IS_SUBBYTE_TYPE_PADDED
+
 Enumerations
 ~~~~~~~~~~~~
 
@@ -25,6 +27,17 @@
 
 .. doxygenenum:: DLDataTypeCode
 
+
+Typedefs
+~~~~~~~~
+
+.. doxygentypedef:: DLPackManagedTensorAllocator
+.. doxygentypedef:: DLPackManagedTensorFromPyObjectNoSync
+.. doxygentypedef:: DLPackManagedTensorToPyObjectNoSync
+.. doxygentypedef:: DLPackDLTensorFromPyObjectNoSync
+.. doxygentypedef:: DLPackCurrentWorkStream
+
+
 Structs
 ~~~~~~~
 
@@ -45,3 +58,9 @@
 
 .. doxygenstruct:: DLManagedTensorVersioned
    :members:
+
+.. doxygenstruct:: DLPackExchangeAPIHeader
+   :members:
+
+.. doxygenstruct:: DLPackExchangeAPI
+   :members:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-1.1/docs/source/python_spec.rst 
new/dlpack-1.3/docs/source/python_spec.rst
--- old/dlpack-1.1/docs/source/python_spec.rst  2025-03-10 20:12:16.000000000 
+0100
+++ new/dlpack-1.3/docs/source/python_spec.rst  2026-01-24 07:37:33.000000000 
+0100
@@ -179,6 +179,44 @@
    guaranteed to be in a certain order or not.
 
 
+DLPack C Exchange API
+~~~~~~~~~~~~~~~~~~~~~
+
+Starting with DLPack 1.3, a new C Exchange API is introduced to enable faster
+data exchange than the Python ``__dlpack__`` API at the C extension level.
+Producer array frameworks may provide a ``__dlpack_c_exchange_api__``
+attribute on the array type.
+The attribute should be a ``PyCapsule`` with name ``"dlpack_exchange_api"``.
+The consumer can query whether this attribute exists and use it at the C 
extension level.
+Notably, consumer frameworks can always start implementing by only using the 
Python ``__dlpack__`` API,
+and then upgrade to the C Exchange API later when faster data exchange is 
needed.
+
+.. code-block:: C
+
+   // Get type, fetch capsule attribute, and extract the C struct pointer
+   PyObject *api_capsule = PyObject_GetAttrString((PyObject 
*)Py_TYPE(tensor_obj), "__dlpack_c_exchange_api__");
+   if (api_capsule == NULL) { goto handle_error; }
+   MyDLPackExchangeAPI *api = (MyDLPackExchangeAPI 
*)PyCapsule_GetPointer(api_capsule, "dlpack_exchange_api");
+   Py_DECREF(api_capsule);
+   if (api == NULL) { goto handle_error; }
+
+
+.. note:: Implementation of the C Exchange API
+
+   Producer framework should implement the C Exchange API in a static way 
either
+   through Cython, Python C extensions, or Python binding mechanism. 
Importantly,
+   because the DLPack C exchange API operates at the C extension level, we need
+   direct interaction between the array framework ``PyObject*`` and DLPack,
+   as a result it is harder to implement the C Exchange API through ctypes 
(because
+   ctypes release thread state by default which is needed to interact with the 
Python C API).
+
+A reference implementations of the C Exchange API in frameworks:
+
+
+* PyTorch: `C++ 
<https://github.com/pytorch/pytorch/tree/8da5d29de7feb165047246464d09c4c2b2318987/torch/csrc/Module.cpp#L692>`__
+* Paddle: `C++ 
<https://github.com/PaddlePaddle/Paddle/tree/6f808ba7d4742305a3a84de5cd299b8b76adfe5c/paddle/fluid/pybind/pybind.cc#L856>`__
+
+
 Reference Implementations
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -198,3 +236,4 @@
 * mpi4py: `Cython 
<https://github.com/mpi4py/mpi4py/blob/master/src/mpi4py/MPI.src/asdlpack.pxi>`_
 * Paddle: `C++ 
<https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/tensor_util.cc#L901-L951>`__,
 `Python wrapper using Python C API 
<https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/pybind.cc#L1263-L1280>`__
 * Hidet: `ctypes 
<https://github.com/hidet-org/hidet/blob/main/python/hidet/graph/impl/dlpack.py>`__
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-1.1/include/dlpack/dlpack.h 
new/dlpack-1.3/include/dlpack/dlpack.h
--- old/dlpack-1.1/include/dlpack/dlpack.h      2025-03-10 20:12:16.000000000 
+0100
+++ new/dlpack-1.3/include/dlpack/dlpack.h      2026-01-24 07:37:33.000000000 
+0100
@@ -1,5 +1,5 @@
 /*!
- *  Copyright (c) 2017 by Contributors
+ *  Copyright (c) 2017 -  by Contributors
  * \file dlpack.h
  * \brief The common header of DLPack.
  */
@@ -19,7 +19,7 @@
 #define DLPACK_MAJOR_VERSION 1
 
 /*! \brief The current minor version of dlpack */
-#define DLPACK_MINOR_VERSION 1
+#define DLPACK_MINOR_VERSION 3
 
 /*! \brief DLPACK_DLL prefix for windows */
 #ifdef _WIN32
@@ -118,6 +118,8 @@
   kDLHexagon = 16,
   /*! \brief Microsoft MAIA devices */
   kDLMAIA = 17,
+  /*! \brief AWS Trainium */
+  kDLTrn = 18,
 } DLDeviceType;
 
 /*!
@@ -222,8 +224,8 @@
    * types. This pointer is always aligned to 256 bytes as in CUDA. The
    * `byte_offset` field should be used to point to the beginning of the data.
    *
-   * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
-   * TVM, perhaps others) do not adhere to this 256 byte aligment requirement
+   * Note that as of Nov 2021, multiple libraries (CuPy, PyTorch, TensorFlow,
+   * TVM, perhaps others) do not adhere to this 256 byte alignment requirement
    * on CPU/CUDA/ROCm, and always use `byte_offset=0`.  This must be fixed
    * (after which this note will be updated); at the moment it is recommended
    * to not rely on the data pointer being correctly aligned.
@@ -252,11 +254,23 @@
   int32_t ndim;
   /*! \brief The data type of the pointer*/
   DLDataType dtype;
-  /*! \brief The shape of the tensor */
+  /*!
+   * \brief The shape of the tensor
+   *
+   *  When ndim == 0, shape can be set to NULL.
+   */
   int64_t* shape;
   /*!
-   * \brief strides of the tensor (in number of elements, not bytes)
-   *  can be NULL, indicating tensor is compact and row-majored.
+   * \brief strides of the tensor (in number of elements, not bytes),
+   *  can not be NULL if ndim != 0, must points to
+   *  an array of ndim elements that specifies the strides,
+   *  so consumer can always rely on strides[dim] being valid for 0 <= dim < 
ndim.
+   *
+   *  When ndim == 0, strides can be set to NULL.
+   *
+   *  \note Before DLPack v1.2, strides can be NULL to indicate contiguous 
data.
+   *        This is not allowed in DLPack v1.2 and later. The rationale
+   *        is to simplify the consumer handling.
    */
   int64_t* strides;
   /*! \brief The offset in bytes to the beginning pointer to data */
@@ -293,7 +307,7 @@
   void (*deleter)(struct DLManagedTensor * self);
 } DLManagedTensor;
 
-// bit masks used in in the DLManagedTensorVersioned
+// bit masks used in the DLManagedTensorVersioned
 
 /*! \brief bit mask to indicate that the tensor is read only. */
 #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL)
@@ -306,7 +320,7 @@
  */
 #define DLPACK_FLAG_BITMASK_IS_COPIED (1UL << 1UL)
 
-/*
+/*!
  * \brief bit mask to indicate that whether a sub-byte type is packed or 
padded.
  *
  * The default for sub-byte types (ex: fp4/fp6) is assumed packed. This flag 
can
@@ -324,7 +338,7 @@
  *
  * \note This is the current standard DLPack exchange data structure.
  */
-struct DLManagedTensorVersioned {
+typedef struct DLManagedTensorVersioned {
   /*!
    * \brief The API and ABI version of the current managed Tensor
    */
@@ -358,7 +372,274 @@
   uint64_t flags;
   /*! \brief DLTensor which is being memory managed */
   DLTensor dl_tensor;
-};
+} DLManagedTensorVersioned;
+
+//----------------------------------------------------------------------
+// DLPack `__dlpack_c_exchange_api__` fast exchange protocol definitions
+//----------------------------------------------------------------------
+/*!
+ * \brief Request a producer library to create a new tensor.
+ *
+ * Create a new `DLManagedTensorVersioned` within the context of the producer
+ * library. The allocation is defined via the prototype DLTensor.
+ *
+ * This function is exposed by the framework through the DLPackExchangeAPI.
+ *
+ * \param prototype The prototype DLTensor. Only the dtype, ndim, shape,
+ *        and device fields are used.
+ * \param out The output DLManagedTensorVersioned.
+ * \param error_ctx Context for `SetError`.
+ * \param SetError The function to set the error.
+ * \return The owning DLManagedTensorVersioned* or NULL on failure.
+ *         SetError is called exactly when NULL is returned (the implementer
+ *         must ensure this).
+ * \note - As a C function, must not thrown C++ exceptions.
+ *       - Error propagation via SetError to avoid any direct need
+ *         of Python API. Due to this `SetError` may have to ensure the GIL is
+ *         held since it will presumably set a Python error.
+ *
+ * \sa DLPackExchangeAPI
+ */
+typedef int (*DLPackManagedTensorAllocator)(                                   
      //
+  DLTensor* prototype, DLManagedTensorVersioned** out, void* error_ctx,        
      //
+  void (*SetError)(void* error_ctx, const char* kind, const char* message)     
      //
+);
+
+/*!
+ * \brief Exports a PyObject* Tensor/NDArray to a DLManagedTensorVersioned.
+ *
+ * This function does not perform any stream synchronization. The consumer 
should query
+ * DLPackCurrentWorkStream to get the current work stream and launch kernels 
on it.
+ *
+ * This function is exposed by the framework through the DLPackExchangeAPI.
+ *
+ * \param py_object The Python object to convert. Must have the same type
+ *        as the one the `DLPackExchangeAPI` was discovered from.
+ * \param out The output DLManagedTensorVersioned.
+ * \return The owning DLManagedTensorVersioned* or NULL on failure with a
+ *         Python exception set. If the data cannot be described using DLPack
+ *         this should be a BufferError if possible.
+ * \note - As a C function, must not thrown C++ exceptions.
+ *
+ * \sa DLPackExchangeAPI, DLPackCurrentWorkStream
+ */
+typedef int (*DLPackManagedTensorFromPyObjectNoSync)(                 //
+  void* py_object,                                                    //
+  DLManagedTensorVersioned** out                                      //
+);
+
+/*!
+ * \brief Exports a PyObject* Tensor/NDArray to a provided DLTensor.
+ *
+ * This function provides a faster interface for temporary, non-owning, 
exchange.
+ * The producer (implementer) still owns the memory of data, strides, shape.
+ * The liveness of the DLTensor and the data it views is only guaranteed until
+ * control is returned.
+ *
+ * This function currently assumes that the producer (implementer) can fill
+ * in the DLTensor shape and strides without the need for temporary 
allocations.
+ *
+ * This function does not perform any stream synchronization. The consumer 
should query
+ * DLPackCurrentWorkStream to get the current work stream and launch kernels 
on it.
+ *
+ * This function is exposed by the framework through the DLPackExchangeAPI.
+ *
+ * \param py_object The Python object to convert. Must have the same type
+ *        as the one the `DLPackExchangeAPI` was discovered from.
+ * \param out The output DLTensor, whose space is pre-allocated on stack.
+ * \return 0 on success, -1 on failure with a Python exception set.
+ * \note - As a C function, must not thrown C++ exceptions.
+ *
+ * \sa DLPackExchangeAPI, DLPackCurrentWorkStream
+ */
+typedef int (*DLPackDLTensorFromPyObjectNoSync)(                      //
+  void* py_object,                                                    //
+  DLTensor* out                                                       //
+);
+
+/*!
+ * \brief Obtain the current work stream of a device.
+ *
+ * Obtain the current work stream of a device from the producer framework.
+ * For example, it should map to torch.cuda.current_stream in PyTorch.
+ *
+ * When device_type is kDLCPU, the consumer do not have to query the stream
+ * and the producer can simply return NULL when queried.
+ * The consumer do not have to do anything on stream sync or setting.
+ * So CPU only framework can just provide a dummy implementation that
+ * always set out_current_stream[0] to NULL.
+ *
+ * \param device_type The device type.
+ * \param device_id The device id.
+ * \param out_current_stream The output current work stream.
+ *
+ * \return 0 on success, -1 on failure with a Python exception set.
+ * \note - As a C function, must not thrown C++ exceptions.
+ *
+ * \sa DLPackExchangeAPI
+ */
+typedef int (*DLPackCurrentWorkStream)(                         //
+  DLDeviceType device_type,                                     //
+  int32_t device_id,                                            //
+  void** out_current_stream                                     //
+);
+
+/*!
+ * \brief Imports a DLManagedTensorVersioned to a PyObject* Tensor/NDArray.
+ *
+ * Convert an owning DLManagedTensorVersioned* to the Python tensor of the
+ * producer (implementer) library with the correct type.
+ *
+ * This function does not perform any stream synchronization.
+ *
+ * This function is exposed by the framework through the DLPackExchangeAPI.
+ *
+ * \param tensor The DLManagedTensorVersioned to convert the ownership of the
+ *        tensor is stolen.
+ * \param out_py_object The output Python object.
+ * \return 0 on success, -1 on failure with a Python exception set.
+ *
+ * \sa DLPackExchangeAPI
+ */
+typedef int (*DLPackManagedTensorToPyObjectNoSync)(                //
+  DLManagedTensorVersioned* tensor,                                //
+  void** out_py_object                                             //
+);
+
+/*!
+ * \brief DLPackExchangeAPI stable header.
+ * \sa DLPackExchangeAPI
+ */
+typedef struct DLPackExchangeAPIHeader {
+  /*!
+   * \brief The provided DLPack version the consumer must check major version
+   *        compatibility before using this struct.
+   */
+  DLPackVersion version;
+  /*!
+   * \brief Optional pointer to an older DLPackExchangeAPI in the chain.
+   *
+   * It must be NULL if the framework does not support older versions.
+   * If the current major version is larger than the one supported by the
+   * consumer, the consumer may walk this to find an earlier supported version.
+   *
+   * \sa DLPackExchangeAPI
+   */
+  struct DLPackExchangeAPIHeader* prev_api;
+} DLPackExchangeAPIHeader;
+
+/*!
+ * \brief Framework-specific function pointers table for DLPack exchange.
+ *
+ * Additionally to `__dlpack__()` we define a C function table sharable by
+ *
+ * Python implementations via `__dlpack_c_exchange_api__`.
+ * This attribute must be set on the type as a Python PyCapsule
+ * with name "dlpack_exchange_api".
+ *
+ * A consumer library may use a pattern such as:
+ *
+ * \code
+ *
+ *  PyObject *api_capsule = PyObject_GetAttrString(
+ *    (PyObject *)Py_TYPE(tensor_obj), "__dlpack_c_exchange_api__")
+ *  );
+ *  if (api_capsule == NULL) { goto handle_error; }
+ *  MyDLPackExchangeAPI *api = (MyDLPackExchangeAPI *)PyCapsule_GetPointer(
+ *    api_capsule, "dlpack_exchange_api"
+ *  );
+ *  Py_DECREF(api_capsule);
+ *  if (api == NULL) { goto handle_error; }
+ *
+ * \endcode
+ *
+ * Note that this must be defined on the type. The consumer should look up the
+ * attribute on the type and may cache the result for each unique type.
+ *
+ * The precise API table is given by:
+ * \code
+ * struct MyDLPackExchangeAPI : public DLPackExchangeAPI {
+ *   MyDLPackExchangeAPI() {
+ *     header.version.major = DLPACK_MAJOR_VERSION;
+ *     header.version.minor = DLPACK_MINOR_VERSION;
+ *     header.prev_version_api = nullptr;
+ *
+ *     managed_tensor_allocator = MyDLPackManagedTensorAllocator;
+ *     managed_tensor_from_py_object_no_sync = 
MyDLPackManagedTensorFromPyObjectNoSync;
+ *     managed_tensor_to_py_object_no_sync = 
MyDLPackManagedTensorToPyObjectNoSync;
+ *     dltensor_from_py_object_no_sync = MyDLPackDLTensorFromPyObjectNoSync;
+ *     current_work_stream = MyDLPackCurrentWorkStream;
+ *  }
+ *
+ *  static const DLPackExchangeAPI* Global() {
+ *     static MyDLPackExchangeAPI inst;
+ *     return &inst;
+ *  }
+ * };
+ * \endcode
+ *
+ * Guidelines for leveraging DLPackExchangeAPI:
+ *
+ * There are generally two kinds of consumer needs for DLPack exchange:
+ * - N0: library support, where consumer.kernel(x, y, z) would like to run a 
kernel
+ *       with the data from x, y, z. The consumer is also expected to run the 
kernel with the same
+ *       stream context as the producer. For example, when x, y, z is 
torch.Tensor,
+ *       consumer should query exchange_api->current_work_stream to get the
+ *       current stream and launch the kernel with the same stream.
+ *       This setup is necessary for no synchronization in kernel launch and 
maximum compatibility
+ *       with CUDA graph capture in the producer.
+ *       This is the desirable behavior for library extension support for 
frameworks like PyTorch.
+ * - N1: data ingestion and retention
+ *
+ * Note that obj.__dlpack__() API should provide useful ways for N1.
+ * The primary focus of the current DLPackExchangeAPI is to enable faster 
exchange N0
+ * with the support of the function pointer current_work_stream.
+ *
+ * Array/Tensor libraries should statically create and initialize this 
structure
+ * then return a pointer to DLPackExchangeAPI as an int value in Tensor/Array.
+ * The DLPackExchangeAPI* must stay alive throughout the lifetime of the 
process.
+ *
+ * One simple way to do so is to create a static instance of DLPackExchangeAPI
+ * within the framework and return a pointer to it. The following code
+ * shows an example to do so in C++. It should also be reasonably easy
+ * to do so in other languages.
+ */
+typedef struct DLPackExchangeAPI {
+  /*!
+   * \brief The header that remains stable across versions.
+   */
+  DLPackExchangeAPIHeader header;
+  /*!
+   * \brief Producer function pointer for DLPackManagedTensorAllocator
+   *        This function must not be NULL.
+   * \sa DLPackManagedTensorAllocator
+   */
+  DLPackManagedTensorAllocator managed_tensor_allocator;
+  /*!
+   * \brief Producer function pointer for DLPackManagedTensorFromPyObject
+   *        This function must be not NULL.
+   * \sa DLPackManagedTensorFromPyObject
+   */
+  DLPackManagedTensorFromPyObjectNoSync managed_tensor_from_py_object_no_sync;
+  /*!
+   * \brief Producer function pointer for DLPackManagedTensorToPyObject
+   *        This function must be not NULL.
+   * \sa DLPackManagedTensorToPyObjectNoSync
+   */
+  DLPackManagedTensorToPyObjectNoSync managed_tensor_to_py_object_no_sync;
+  /*!
+   * \brief Producer function pointer for DLPackDLTensorFromPyObject
+   *        This function can be NULL when the producer does not support this 
function.
+   * \sa DLPackDLTensorFromPyObjectNoSync
+   */
+  DLPackDLTensorFromPyObjectNoSync dltensor_from_py_object_no_sync;
+  /*!
+   * \brief Producer function pointer for DLPackCurrentWorkStream
+   *        This function must be not NULL.
+   * \sa DLPackCurrentWorkStream
+   */
+  DLPackCurrentWorkStream current_work_stream;
+} DLPackExchangeAPI;
 
 #ifdef __cplusplus
 }  // DLPACK_EXTERN_C
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-1.1/tests/scripts/task_lint.sh 
new/dlpack-1.3/tests/scripts/task_lint.sh
--- old/dlpack-1.1/tests/scripts/task_lint.sh   2025-03-10 20:12:16.000000000 
+0100
+++ new/dlpack-1.3/tests/scripts/task_lint.sh   2026-01-24 07:37:33.000000000 
+0100
@@ -5,7 +5,7 @@
 
 if [ ! -f bin/lint.py ]; then
     echo "Grab linter ..."
-    wget https://raw.githubusercontent.com/dmlc/dmlc-core/main/scripts/lint.py
+    wget 
https://raw.githubusercontent.com/dmlc/dmlc-core/28c2bfd2a8cf46e632e30d7fec006d3ee4b26b1c/scripts/lint.py
     mv lint.py bin/lint.py
 fi
 

Reply via email to