Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package mvapich2 for openSUSE:Factory 
checked in at 2022-07-05 12:08:57
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/mvapich2 (Old)
 and      /work/SRC/openSUSE:Factory/.mvapich2.new.1548 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "mvapich2"

Tue Jul  5 12:08:57 2022 rev:31 rq:986553 version:2.3.7

Changes:
--------
--- /work/SRC/openSUSE:Factory/mvapich2/mvapich2.changes        2022-02-17 
00:32:14.061415680 +0100
+++ /work/SRC/openSUSE:Factory/.mvapich2.new.1548/mvapich2.changes      
2022-07-05 12:09:00.972516744 +0200
@@ -1,0 +2,46 @@
+Wed Jun 29 12:54:33 UTC 2022 - Klaus K??mpf <[email protected]>
+
+- add pass-correct-size-to-snprintf.patch to fix potential buffer
+  overflows (required to make 'sundials' testsuite pass)
+
+- Update to mvapich2 2.3.7
+  * Features and Enhancements (since 2.3.6):
+    - Added support for systems with Rockport's switchless networks
+      * Added automatic architecture detection
+      * Optimized performance for point-to-point operations
+    - Added support for the Cray Slingshot 10 interconnect
+    - Enhanced support for blocking collective offload using
+      Mellanox SHARP
+        * Scatter and Scatterv
+    - Enhanced support for non-blocking collective offload using
+      Mellanox SHARP
+        * Iallreduce, Ibarrier, Ibcast, and Ireduce
+
+  * Bug Fixes (since 2.3.6):
+    - Removed several deprectated functions
+      - Thanks to Honggang Li @RedHat for the report
+    - Fixed a bug where tools like CMake FindMPI would not
+      detect MVAPICH  when compiled without Hydra mpiexec
+      - Thanks to Chris Chambreau and Adam Moody @LLNL for the report
+    - Fixed compilation error when building with mpirun and without hydra
+      - Thanks to James Long @University of Illinois for the report
+    - Fixed issue with setting RoCE mode correctly without RDMA_CM.
+      - Thanks to Nicolas Gagnon @Rockport Networks for the report
+    - Fixed an issue on heterogeneous clusters where QP attributes were
+      set incorrectly
+      - Thanks to X-ScaleSolutions for the report and fix
+    - Fixed a memory leak in improbe on the PSM channel
+      - Thanks to Gregory Lee @LLNL Beichuan Yan @University of Colorado
+        for the report
+    - Added retry logic for PSM connection establishment
+      - Thanks to Gregory Lee @LLNL for the report and X-ScaleSolutions
+        for the patch
+    - Fixed an initialization error when using PSM and gcc's -pg option
+      - Thanks to Gregory Lee @LLNL for the report and X-ScaleSolutions for 
+        the patch
+    - Fixed a potential integer overflow when transfering large arrays
+      - Thanks to Alexander Melnikov for the report and patch  
+
+- Fix Url: link
+
+-------------------------------------------------------------------

Old:
----
  mvapich2-2.3.6.tar.gz

New:
----
  _constraints
  mvapich2-2.3.7-1.tar.gz
  pass-correct-size-to-snprintf.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ mvapich2.spec ++++++
--- /var/tmp/diff_new_pack.tYMGbW/_old  2022-07-05 12:09:02.020518249 +0200
+++ /var/tmp/diff_new_pack.tYMGbW/_new  2022-07-05 12:09:02.028518260 +0200
@@ -19,8 +19,9 @@
 %global flavor @BUILD_FLAVOR@%{nil}
 
 %define pname mvapich2
-%define vers  2.3.6
-%define _vers 2_3_6
+%define vers  2.3.7
+%define _vers 2_3_7
+%define rc_ver -1
 
 %if "%{flavor}" == ""
 ExclusiveArch:  do_not_build
@@ -241,7 +242,7 @@
 Group:          Development/Libraries/Parallel
 Version:        %{vers}
 Release:        0
-Source0:        
http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-%{version}.tar.gz
+Source0:        
http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-%{version}%{?rc_ver}.tar.gz
 Source1:        mpivars.sh
 Source2:        mpivars.csh
 Source3:        macros.hpc-mvapich2
@@ -252,13 +253,14 @@
 # It's been merged upstream, should be removed with the next release
 Patch3:         0001-Drop-GCC-check.patch
 Patch4:         reproducible.patch
+Patch5:         pass-correct-size-to-snprintf.patch
 
 ## Armv7 specific patches
 # PATCH-FIX-UPSTREAM 0001-Drop-real128.patch 
(https://github.com/pmodels/mpich/issues/4005)
 Patch50:        0001-Drop-real128.patch
 Patch51:        0001-Drop-Real-16.patch
 
-URL:            http://mvapich.cse.ohio-state.edu/overview/mvapich2/
+URL:            http://mvapich.cse.ohio-state.edu
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 
 %if %{without skip_hpc_build}
@@ -389,6 +391,7 @@
 %patch2
 %patch3
 %patch4
+%patch5 -p1
 
 # Only apply these patches on Armv7
 %ifarch armv7hl

++++++ _constraints ++++++
<constraints>
  <hardware>
    <processors>8</processors>
    <disk>
      <size unit="G">10</size>
    </disk>
    <physicalmemory>
      <size unit="G">16</size>
    </physicalmemory>
  </hardware>
</constraints>
(No newline at EOF)

++++++ pass-correct-size-to-snprintf.patch ++++++
diff -wruN -x '*~' -x '*.o' -x '*.a' -x '*.so' -x '*.so.[0-9]' -x 
autom4te.cache -x .deps -x .libs 
../orig-mvapich2-2.3.7-1/src/mpid/ch3/channels/common/src/affinity/hwloc_bind.c 
./src/mpid/ch3/channels/common/src/affinity/hwloc_bind.c
--- 
../orig-mvapich2-2.3.7-1/src/mpid/ch3/channels/common/src/affinity/hwloc_bind.c 
    2022-05-16 18:58:22.000000000 +0200
+++ ./src/mpid/ch3/channels/common/src/affinity/hwloc_bind.c    2022-06-29 
15:07:17.700058168 +0200
@@ -2107,7 +2107,7 @@
         for (i = 0; i < g_smpi.num_local_nodes; ++i) {
             hwloc_bitmap_clr(*free_sock_cpuset, local_core_ids[i]);
         }
-        hwloc_bitmap_snprintf(cpu_str, 128, *free_sock_cpuset);
+        hwloc_bitmap_snprintf(cpu_str, sizeof(cpu_str), *free_sock_cpuset);
         PRINT_DEBUG(DEBUG_INIT_verbose, "Free sock_cpuset = %s\n", cpu_str);
     }
 
@@ -3190,11 +3190,11 @@
         for (i = 0; i < local_procs; i++) {
             curr = count;
             for (k = 0; k < num_app_threads; k++) {
-                j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d,", 
mv2_core_map[curr]);
+                j += snprintf (mapping+j, sizeof(mapping)-j, "%d,", 
mv2_core_map[curr]);
                 curr = (curr + 1) % num_pu;
             }
             mapping [--j] = '\0'; 
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, ":");
+            j += snprintf (mapping+j, sizeof(mapping)-j, ":");
             count = (count + hw_threads_per_core) % num_pu;
         }
     } else if (mv2_hybrid_binding_policy == HYBRID_LINEAR) {
@@ -3203,14 +3203,14 @@
          * resources  */
         for (i = 0; i < local_procs; i++) {
             for (k = 0; k < num_app_threads; k++) {
-                j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d,", 
mv2_core_map[curr]);
+                j += snprintf (mapping+j, sizeof(mapping)-j, "%d,", 
mv2_core_map[curr]);
 
                 curr = ((curr + hw_threads_per_core) >= num_pu) ?
                             ((curr + hw_threads_per_core+ ++step) % num_pu) :
                             (curr + hw_threads_per_core) % num_pu;
             }
             mapping [--j] = '\0';
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, ":");
+            j += snprintf (mapping+j, sizeof(mapping)-j, ":");
         }    
     } else if (mv2_hybrid_binding_policy == HYBRID_SPREAD) {
 #if defined(CHANNEL_MRAIL)
@@ -3232,12 +3232,12 @@
             for (i = 0; i < local_procs; i++) {
                  for (k = curr; k < curr+chunk; k++) {
                      for (l = 0; l < hw_threads_per_core; l++) {
-                        j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d,", 
+                        j += snprintf (mapping+j, sizeof(mapping)-j, "%d,", 
                                 mv2_core_map[k * hw_threads_per_core + l]);
                      }
                  }
                  mapping [--j] = '\0';
-                 j += snprintf (mapping+j, _POSIX2_LINE_MAX, ":");
+                 j += snprintf (mapping+j, sizeof(mapping)-j, ":");
                  curr = (curr + chunk) % size;
             } 
         } else {
@@ -3252,11 +3252,11 @@
             for (i = 0; i < num_sockets; i++) {
                 for (k = curr; k < curr+ranks_per_sock; k++) {
                     for (l = 0; l < hw_threads_per_core; l++) {
-                        j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d,",
+                        j += snprintf (mapping+j, sizeof(mapping)-j, "%d,",
                                 mv2_core_map[k * hw_threads_per_core + l]);
                     }
                     mapping [--j] = '\0';
-                    j += snprintf (mapping+j, _POSIX2_LINE_MAX, ":");
+                    j += snprintf (mapping+j, sizeof(mapping)-j, ":");
                 }
                 curr = (curr + ((num_pu_per_socket/hw_threads_per_core)  * 
chunk)) % size;
             }
@@ -3265,7 +3265,7 @@
         /* Bunch mapping: Bind each MPI rank to a single phyical core of first
          * socket followed by second secket */
         for (i = 0; i < local_procs; i++) {
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d:", 
mv2_core_map[k]);
+            j += snprintf (mapping+j, sizeof(mapping)-j, "%d:", 
mv2_core_map[k]);
             k = (k + hw_threads_per_core) % size;
         } 
     } else if (mv2_hybrid_binding_policy == HYBRID_SCATTER) {
@@ -3283,7 +3283,7 @@
             return MPI_ERR_OTHER;
         }
         for (i = 0; i < local_procs; i++) {
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d:", 
mv2_core_map[k]);
+            j += snprintf (mapping+j, sizeof(mapping)-j, "%d:", 
mv2_core_map[k]);
             k = (i % num_sockets == 0) ?
                     (k + num_pu_per_socket) % size :
                     (k + num_pu_per_socket + hw_threads_per_core) % size;
@@ -3315,10 +3315,10 @@
         /* NUMA mapping: Bind consecutive MPI ranks to different NUMA domains 
in
          * round-robin fashion. */
         for (i = 0; i < local_procs; i++) {
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, "%d,", 
+            j += snprintf (mapping+j, sizeof(mapping)-j, "%d,", 
                                
mv2_core_map_per_numa[node_base_pu+node_offset]);
             mapping [--j] = '\0';
-            j += snprintf (mapping+j, _POSIX2_LINE_MAX, ":");
+            j += snprintf (mapping+j, sizeof(mapping)-j, ":");
             node_base_pu = (node_base_pu + num_pu_per_numanode) % size;
             node_offset = (node_base_pu == 0) ? 
                             (node_offset + ((hw_threads_per_core > 0) ? 
hw_threads_per_core : 1)) : 

Reply via email to