Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package libsvm for openSUSE:Factory checked 
in at 2026-01-01 15:01:00
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/libsvm (Old)
 and      /work/SRC/openSUSE:Factory/.libsvm.new.1928 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "libsvm"

Thu Jan  1 15:01:00 2026 rev:7 rq:1324936 version:3.36

Changes:
--------
--- /work/SRC/openSUSE:Factory/libsvm/libsvm.changes    2024-09-27 
17:11:58.127845793 +0200
+++ /work/SRC/openSUSE:Factory/.libsvm.new.1928/libsvm.changes  2026-01-01 
15:02:07.517626500 +0100
@@ -1,0 +2,8 @@
+Sun Dec 28 23:29:56 UTC 2025 - Dirk Müller <[email protected]>
+
+- update to 3.36:
+  * enhanced Python interface.
+  * Probabilistic outputs for one-class SVM are now supported
+ - drop libsvm-java8.patch (java 11 is now default)
+
+-------------------------------------------------------------------

Old:
----
  libsvm-3.3.tar.gz
  libsvm-java8.patch

New:
----
  libsvm-3.36.tar.gz

----------(Old B)----------
  Old:  * Probabilistic outputs for one-class SVM are now supported
 - drop libsvm-java8.patch (java 11 is now default)
----------(Old E)----------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ libsvm.spec ++++++
--- /var/tmp/diff_new_pack.iPQhAj/_old  2026-01-01 15:02:08.073649304 +0100
+++ /var/tmp/diff_new_pack.iPQhAj/_new  2026-01-01 15:02:08.077649468 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package libsvm
 #
-# Copyright (c) 2024 SUSE LLC
+# Copyright (c) 2025 SUSE LLC and contributors
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -16,18 +16,17 @@
 #
 
 
-%define libname libsvm3
-%define fileversion 3.3
+%define libname libsvm4
+%define fileversion 3.36
 
 Summary:        A Library for Support Vector Machines
 License:        BSD-3-Clause
 Group:          Development/Languages/Other
 Name:           libsvm
-Version:        3.30
+Version:        3.36
 Release:        0
 URL:            https://www.csie.ntu.edu.tw/~cjlin/libsvm/
 Source0:        
https://www.csie.ntu.edu.tw/~cjlin/libsvm/%{name}-%{fileversion}.tar.gz
-Patch0:         libsvm-java8.patch
 Patch1:         libsvm-reproducible-jar-mtime.patch
 BuildRequires:  %{python_module devel}
 BuildRequires:  gcc-c++
@@ -97,7 +96,6 @@
 
 %prep
 %setup -n %{name}-%{fileversion}
-%patch -P 0 -p1
 # The "--date" option was added into jar in OpenJDK 17
 %if %{?pkg_vcmp:%pkg_vcmp java-devel >= 17}%{!?pkg_vcmp:0}
 %patch -P 1 -p1
@@ -128,8 +126,8 @@
 install -m 755 ./tools/subset.py %{buildroot}%{_bindir}/svm-subset
 install -m 755 ./tools/easy.py %{buildroot}%{_bindir}/svm-easy
 install -m 644 svm.h %{buildroot}%{_includedir}/libsvm/
-install -m 755 libsvm.so.3 %{buildroot}%{_libdir}
-ln -s %{_libdir}/libsvm.so.3 %{buildroot}%{_libdir}/libsvm.so
+install -m 755 libsvm.so.4 %{buildroot}%{_libdir}
+ln -s %{_libdir}/libsvm.so.4 %{buildroot}%{_libdir}/libsvm.so
 mv ./python/README README-python
 mv ./tools/README README-python-tools
 
@@ -156,7 +154,7 @@
 
 %files -n %{libname}
 %license COPYRIGHT
-%{_libdir}/libsvm.so.3
+%{_libdir}/libsvm.so.4
 
 %files devel
 %{_includedir}/libsvm

++++++ libsvm-3.3.tar.gz -> libsvm-3.36.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/.github/workflows/wheel.yml 
new/libsvm-3.36/.github/workflows/wheel.yml
--- old/libsvm-3.3/.github/workflows/wheel.yml  2022-08-10 16:06:18.000000000 
+0200
+++ new/libsvm-3.36/.github/workflows/wheel.yml 2025-05-12 06:29:48.000000000 
+0200
@@ -4,7 +4,7 @@
   # on new tag
   push:
     tags:
-      - '*'
+      - "*"
 
   # manually trigger
   workflow_dispatch:
@@ -15,26 +15,31 @@
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
-         os: [windows-2019]
+        os: [windows-2022, macos-13]
 
     steps:
       - uses: actions/checkout@v2
 
-      # Used to host cibuildwheel
-      - uses: actions/setup-python@v2
-
-      - name: Install cibuildwheel
-        run: python -m pip install cibuildwheel==2.3.1
+      - name: Set MacOS compiler
+        if: runner.os == 'macOS'
+        run: |
+          brew install gcc@13;
+          echo "CXX=gcc-13" >> $GITHUB_ENV
 
       - name: Build wheels
-        working-directory: ./python
-        run: python -m cibuildwheel --output-dir wheelhouse
+        uses: pypa/[email protected]
         env:
           # don't build for PyPython and windows 32-bit
           CIBW_SKIP: pp* *win32*
+          # force compiler on macOS
+          CXX: ${{ env.CXX }}
+          CC: ${{ env.CXX }}
+        with:
+          package-dir: ./python
+          output-dir: ./python/wheelhouse
 
       - name: Upload a Build Artifact
-        uses: actions/[email protected]
+        uses: actions/upload-artifact@v4
         with:
+          name: wheels-${{ matrix.os }}
           path: ./python/wheelhouse
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/COPYRIGHT new/libsvm-3.36/COPYRIGHT
--- old/libsvm-3.3/COPYRIGHT    2022-08-10 16:06:18.000000000 +0200
+++ new/libsvm-3.36/COPYRIGHT   2025-05-12 06:29:48.000000000 +0200
@@ -1,5 +1,5 @@
 
-Copyright (c) 2000-2022 Chih-Chung Chang and Chih-Jen Lin
+Copyright (c) 2000-2023 Chih-Chung Chang and Chih-Jen Lin
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/FAQ.html new/libsvm-3.36/FAQ.html
--- old/libsvm-3.3/FAQ.html     2022-08-10 16:06:18.000000000 +0200
+++ new/libsvm-3.36/FAQ.html    2025-05-12 06:29:48.000000000 +0200
@@ -9,7 +9,7 @@
 <a name="_TOP"><b><h1><a
 href=http://www.csie.ntu.edu.tw/~cjlin/libsvm>LIBSVM</a>  FAQ </h1></b></a>
 <b>last modified : </b>
-Sat, 12 Mar 2022 22:30:54 GMT
+Sun,  4 Sep 2022 12:32:38 GMT
 <class="categories">
 <li><a
 href="#_TOP">All Questions</a>(84)</li>
@@ -86,7 +86,7 @@
 <li class="headlines_item"><a href="#f427">Why the code gives NaN (not a 
number) results?</a></li>
 <li class="headlines_item"><a href="#f430">Why the sign of predicted labels 
and decision values are sometimes reversed?</a></li>
 <li class="headlines_item"><a href="#f431">I don't know class labels of test 
data. What should I put in the first column of the test file?</a></li>
-<li class="headlines_item"><a href="#f432">How can I use OpenMP to parallelize 
LIBSVM on a multicore/shared-memory computer?</a></li>
+<li class="headlines_item"><a href="#f432">How can I use OpenMP, tbb, or other 
tools to parallelize LIBSVM on a multicore/shared-memory computer?</a></li>
 <li class="headlines_item"><a href="#f433">How could I know which training 
instances are support vectors?</a></li>
 <li class="headlines_item"><a href="#f434">Why sv_indices (indices of support 
vectors) are not stored in the saved model file?</a></li>
 <li class="headlines_item"><a href="#f501">After doing cross validation, why 
there is no model file outputted ?</a></li>
@@ -1241,14 +1241,15 @@
 <a href="#_TOP">[Go Top]</a>  
 <hr/>
   <a name="/Q04:_Training_and_prediction"></a>
-<a name="f432"><b>Q: How can I use OpenMP to parallelize LIBSVM on a 
multicore/shared-memory computer?</b></a>
+<a name="f432"><b>Q: How can I use OpenMP, tbb, or other tools to parallelize 
LIBSVM on a multicore/shared-memory computer?</b></a>
 <br/>                                                                          
      
 
-<p>It is very easy if you are using GCC 4.2
-or after. 
-
-<p> In Makefile, add -fopenmp  to CFLAGS.
+<p>It is very easy. Consider OpenMP. For GCC, version 4.2 or after is needed.
 
+<p> In Makefile, add -fopenmp  to CFLAGS. In the beginning of svm.cpp, add
+<pre>
+#include &lt;omp.h&gt;
+</pre>
 <p> In class SVC_Q of svm.cpp, modify the for loop
 of get_Q to:
 <pre>
@@ -1267,7 +1268,7 @@
 a reduction clause for the variable sum:
 <pre>
 #pragma omp parallel for private(i) reduction(+:sum) schedule(guided)
-               for(i=0;i&lt;model->l;i++)
+               for(i=0;i&lt;model-&gt;l;i++)
                        sum += sv_coef[i] * 
Kernel::k_function(x,model-&gt;SV[i],model-&gt;param);
 </pre>
 
@@ -1293,13 +1294,96 @@
 </pre>
 instead.
 
-<p> For Python interface, you need to add the -lgomp link option:
+<p> For Python interface, you need to add the -fopenmp link option:
 <pre>
-$(CXX) -lgomp -shared -dynamiclib svm.o -o libsvm.so.$(SHVER)
+if [ "$(OS)" = "Darwin" ]; then \
+       SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER) 
-fopenmp"; \
+else \
+       SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER) -fopenmp"; \
+fi; \
 </pre>
 
 <p> For MS Windows, you need to add /openmp in CFLAGS of Makefile.win
 
+<!-- <p>To use OpenMP, check that your compiler supports it. For GCC version 
4.2 or later is needed.
+
+<p>In Makefile, there are the lines
+<pre>
+# Uncomment the following lines to enable parallelization with OpenMP
+# CFLAGS += -fopenmp
+# SHARED_LIB_FLAG += -fopenmp
+</pre>
+Uncomment the latter two lines then rebuild the package to use OpenMP.
+
+<p>Kernel evaluations in training/testing will be parallelized.
+An example of running this modification on an 8-core machine using the data set
+<a href=../libsvmtools/datasets/binary/real-sim.bz2>real-sim</a>:
+
+<p> 8 cores:
+<pre>
+%setenv OMP_NUM_THREADS 8
+%time svm-train -c 8 -g 0.5 -m 1000 real-sim
+175.90sec
+</pre>
+1 core:
+<pre>
+%setenv OMP_NUM_THREADS 1
+%time svm-train -c 8 -g 0.5 -m 1000 real-sim
+588.89sec
+</pre>
+For this data, kernel evaluations take 91% of training time. In the above 
example, we assume you use csh. For bash, use
+<pre>
+export OMP_NUM_THREADS=8
+</pre>
+instead.
+
+<p>For Windows, in Makefile.win there are the lines
+<pre>
+# Uncomment the following lines to enable parallelization with OpenMP
+# CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE /openmp
+</pre>
+Uncomment the latter line and rebuild the package to use OpenMP. -->
+
+<p> Now consider tbb (thread building blocks). Thank Rob Ward for the 
following instructions. First the tbb package must be installed. On Ubuntu this 
can be done by
+<pre>
+apt-get install -y libtbb-dev
+</pre>
+In svm.cpp, add
+<pre>
+#include &lt;atomic&gt;
+#include &lt;execution&gt;
+#include &quot;tbb/iterators.h&quot;
+</pre>
+in the beginning. Then the loops mentioned earlier should be as
+follows. In class SVC_Q of svm.cpp,
+<pre>
+std::for_each(std::execution::par, tbb::counting_iterator&lt;int&gt;(start), 
tbb::counting_iterator&lt;int&gt;(len),
+       [this, i, data](int j){
+               data[j] = (Qfloat)(y[i] * y[j] * (this-&gt;*kernel_function)(i, 
j));
+       });
+</pre>
+In class SVR_Q of svm.cpp,
+<pre>
+std::for_each(std::execution::par, tbb::counting_iterator&lt;int&gt;(0), 
tbb::counting_iterator&lt;int&gt;(l),
+       [this, i, data, real_i](int j){
+               data[j] = (Qfloat)(this-&gt;*kernel_function)(real_i, j);
+       });
+</pre>
+In svm_predict_values() of svm.cpp, for SVR and ONE_CLASS,
+<pre>
+std::atomic&lt;double&gt; sum = 0;
+std::for_each(std::execution::par, tbb::counting_iterator&lt;int&gt;(0), 
tbb::counting_iterator&lt;int&gt;(model-&gt;l),
+       [&amp;sum, sv_coef, x, model](int i){
+               sum += sv_coef[i] * Kernel::k_function(x, model-&gt;SV[i], 
model-&gt;param);
+       });
+</pre>
+and for SVC
+<pre>
+std::for_each(std::execution::par, tbb::counting_iterator&lt;int&gt;(0), 
tbb::counting_iterator&lt;int&gt;(l),
+       [kvalue, x, model](int i){
+               kvalue[i] = Kernel::k_function(x, model-&gt;SV[i], 
model-&gt;param);
+       });
+</pre>
 <p align="right">
 <a href="#_TOP">[Go Top]</a>  
 <hr/>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/Makefile new/libsvm-3.36/Makefile
--- old/libsvm-3.3/Makefile     2022-08-10 16:06:19.000000000 +0200
+++ new/libsvm-3.36/Makefile    2025-05-12 06:29:48.000000000 +0200
@@ -1,18 +1,21 @@
 CXX ?= g++
 CFLAGS = -Wall -Wconversion -O3 -fPIC
-SHVER = 3
+SHVER = 4
 OS = $(shell uname)
+ifeq ($(OS),Darwin)
+       SHARED_LIB_FLAG = -dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)
+else
+       SHARED_LIB_FLAG = -shared -Wl,-soname,libsvm.so.$(SHVER)
+endif
+
+# Uncomment the following lines to enable parallelization with OpenMP
+# CFLAGS += -fopenmp
+# SHARED_LIB_FLAG += -fopenmp
 
 all: svm-train svm-predict svm-scale
 
 lib: svm.o
-       if [ "$(OS)" = "Darwin" ]; then \
-               SHARED_LIB_FLAG="-dynamiclib 
-Wl,-install_name,libsvm.so.$(SHVER)"; \
-       else \
-               SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \
-       fi; \
-       $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER)
-
+       $(CXX) $(SHARED_LIB_FLAG) svm.o -o libsvm.so.$(SHVER)
 svm-predict: svm-predict.c svm.o
        $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm
 svm-train: svm-train.c svm.o
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/Makefile.win new/libsvm-3.36/Makefile.win
--- old/libsvm-3.3/Makefile.win 2022-08-10 16:06:20.000000000 +0200
+++ new/libsvm-3.36/Makefile.win        2025-05-12 06:29:48.000000000 +0200
@@ -8,6 +8,9 @@
 CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE
 TARGET = windows
 
+# Uncomment the following lines to enable parallelization with OpenMP
+# CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE /openmp
+
 all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe 
$(TARGET)\svm-toy.exe lib
 
 $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/README new/libsvm-3.36/README
--- old/libsvm-3.3/README       2022-08-10 16:06:20.000000000 +0200
+++ new/libsvm-3.36/README      2025-05-12 06:29:51.000000000 +0200
@@ -221,7 +221,7 @@
 
 > svm-train -s 0 -c 5 -t 2 -g 0.5 -e 0.1 data_file
 
-Train a classifier with RBF kernel exp(-0.5|u-v|^2), C=10, and
+Train a classifier with RBF kernel exp(-0.5|u-v|^2), C=5, and
 stopping tolerance 0.1.
 
 > svm-train -s 3 -p 0.1 -t 0 data_file
@@ -471,7 +471,10 @@
 
     param describes the parameters used to obtain the model.
 
-    nr_class is the number of classes. It is 2 for regression and one-class 
SVM.
+    nr_class is the number of classes for classification. It is a
+    non-negative integer with special cases of 0 (no training data at
+    all) and 1 (all training data in one class). For regression and
+    one-class SVM, nr_class = 2.
 
     l is the number of support vectors. SV and sv_coef are support
     vectors and the corresponding coefficients, respectively. Assume there are
@@ -664,6 +667,27 @@
         svm_set_print_string_function(NULL);
     for default printing to stdout.
 
+    Please note that this function is not thread-safe. When multiple threads 
load or
+    use the same dynamic library (for example, libsvm.so.4), they actually 
share the
+    same memory space of the dynamic library, which results in all threads 
modifying
+    the same static function pointer, svm_print_string, in svm.cpp when they 
call this
+    function.
+
+    For example, suppose we have threads A and B. They call this function 
sequentially
+    and pass their own thread-local print_func into it. After that, they both 
call (*svm_print_string)(str)
+    once. When the last thread finishes setting it (say B), svm_print_string 
is set to
+    B.print_func. Now, if thread A wants to access svm_print_string, it is 
actually
+    accessing B.print_func rather than A.print_func, which is incorrect since 
we expect
+    to use the functionality of A.print_func.
+
+    Even if A.print_func and B.print_func have identical functionality, it is 
still risky.
+    Suppose svm_print_string is now set to B.print_func, and B deletes 
B.print_func after
+    finishing its work. Later, thread A calls svm_print_string, but the 
address points to,
+    which is B.print_func, has already been deleted. This invalid memory 
access will crash
+    the program. To mitigate this issue, in this example, you should ensure 
that A.print_func
+    and B.print_func remain valid after threads finish their work. For 
example, in Python,
+    you can assign them as global variables.
+
 Java Version
 ============
 
@@ -683,7 +707,7 @@
 Library usages are similar to the C version. These functions are available:
 
 public class svm {
-       public static final int LIBSVM_VERSION=330;
+       public static final int LIBSVM_VERSION=336;
        public static svm_model svm_train(svm_problem prob, svm_parameter 
param);
        public static void svm_cross_validation(svm_problem prob, svm_parameter 
param, int nr_fold, double[] target);
        public static int svm_get_svm_type(svm_model model);
@@ -716,6 +740,9 @@
        };
        svm.svm_set_print_string_function(your_print_func);
 
+However, similar to the C version, it is not thread-safe. Please check the
+usage of C version svm_set_print_string_function() for details.
+
 Building Windows Binaries
 =========================
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/java/Makefile 
new/libsvm-3.36/java/Makefile
--- old/libsvm-3.3/java/Makefile        2022-08-10 16:06:21.000000000 +0200
+++ new/libsvm-3.36/java/Makefile       2025-05-12 06:29:48.000000000 +0200
@@ -5,7 +5,7 @@
                svm_train.class svm_predict.class svm_toy.class svm_scale.class
 
 #JAVAC = jikes
-JAVAC_FLAGS = -target 1.7 -source 1.7
+JAVAC_FLAGS = --release 11
 JAVAC = javac
 # JAVAC_FLAGS =
 export CLASSPATH := .:$(CLASSPATH)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/java/libsvm/svm.java 
new/libsvm-3.36/java/libsvm/svm.java
--- old/libsvm-3.3/java/libsvm/svm.java 2022-08-10 16:07:05.000000000 +0200
+++ new/libsvm-3.36/java/libsvm/svm.java        2025-05-12 06:29:48.000000000 
+0200
@@ -1295,7 +1295,7 @@
        //
        // construct and solve various formulations
        //
-       public static final int LIBSVM_VERSION=330;
+       public static final int LIBSVM_VERSION=336;
        public static final Random rand = new Random();
 
        private static svm_print_interface svm_print_stdout = new 
svm_print_interface()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/java/libsvm/svm.m4 
new/libsvm-3.36/java/libsvm/svm.m4
--- old/libsvm-3.3/java/libsvm/svm.m4   2022-08-10 16:06:22.000000000 +0200
+++ new/libsvm-3.36/java/libsvm/svm.m4  2025-05-12 06:29:48.000000000 +0200
@@ -1295,7 +1295,7 @@
        //
        // construct and solve various formulations
        //
-       public static final int LIBSVM_VERSION=330;
+       public static final int LIBSVM_VERSION=336;
        public static final Random rand = new Random();
 
        private static svm_print_interface svm_print_stdout = new 
svm_print_interface()
Binary files old/libsvm-3.3/java/libsvm.jar and new/libsvm-3.36/java/libsvm.jar 
differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/java/svm_toy.java 
new/libsvm-3.36/java/svm_toy.java
--- old/libsvm-3.3/java/svm_toy.java    2022-08-10 16:06:24.000000000 +0200
+++ new/libsvm-3.36/java/svm_toy.java   2025-05-12 06:29:49.000000000 +0200
@@ -1,11 +1,15 @@
 import libsvm.*;
-import java.applet.*;
 import java.awt.*;
 import java.util.*;
 import java.awt.event.*;
 import java.io.*;
 
-public class svm_toy extends Applet {
+public class svm_toy {
+    public static void main(String[] args) {
+        svm_toy_frame frame = new svm_toy_frame("svm_toy", 500, 500+50);
+       }
+}
+class svm_toy_frame extends Frame {
 
        static final String DEFAULT_PARAM="-t 2 -c 100";
        int XLEN;
@@ -43,10 +47,24 @@
        Vector<point> point_list = new Vector<point>();
        byte current_value = 1;
 
-       public void init()
+       svm_toy_frame(String title, int width, int height)
        {
-               setSize(getSize());
+               super(title);
+               this.addWindowListener(new WindowAdapter() {
+                       public void windowClosing(WindowEvent e) {
+                               System.exit(0);
+                       }
+               });
+               this.init();
+               this.setSize(width, height);
+               XLEN = width;
+               YLEN = height-50;
+               this.clear_all();
+               this.setVisible(true);
+       }
 
+       void init()
+       {
                final Button button_change = new Button("Change");
                Button button_run = new Button("Run");
                Button button_clear = new Button("Clear");
@@ -466,37 +484,4 @@
                }
                g.drawImage(buffer,0,0,this);
        }
-
-       public Dimension getPreferredSize() { return new 
Dimension(XLEN,YLEN+50); }
-
-       public void setSize(Dimension d) { setSize(d.width,d.height); }
-       public void setSize(int w,int h) {
-               super.setSize(w,h);
-               XLEN = w;
-               YLEN = h-50;
-               clear_all();
-       }
-
-       public static void main(String[] argv)
-       {
-               new AppletFrame("svm_toy",new svm_toy(),500,500+50);
-       }
-}
-
-class AppletFrame extends Frame {
-       AppletFrame(String title, Applet applet, int width, int height)
-       {
-               super(title);
-               this.addWindowListener(new WindowAdapter() {
-                       public void windowClosing(WindowEvent e) {
-                               System.exit(0);
-                       }
-               });
-               applet.init();
-               applet.setSize(width,height);
-               applet.start();
-               this.add(applet);
-               this.pack();
-               this.setVisible(true);
-       }
-}
+}
\ No newline at end of file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/java/test_applet.html 
new/libsvm-3.36/java/test_applet.html
--- old/libsvm-3.3/java/test_applet.html        2022-08-10 16:06:24.000000000 
+0200
+++ new/libsvm-3.36/java/test_applet.html       1970-01-01 01:00:00.000000000 
+0100
@@ -1 +0,0 @@
-<APPLET code="svm_toy.class" archive="libsvm.jar" width=300 
height=350></APPLET>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/matlab/libsvmread.c 
new/libsvm-3.36/matlab/libsvmread.c
--- old/libsvm-3.3/matlab/libsvmread.c  2022-08-10 16:06:25.000000000 +0200
+++ new/libsvm-3.36/matlab/libsvmread.c 2025-05-12 06:29:49.000000000 +0200
@@ -188,7 +188,9 @@
 void mexFunction( int nlhs, mxArray *plhs[],
                int nrhs, const mxArray *prhs[] )
 {
-       char filename[256];
+       #define filename_size 256
+
+       char filename[filename_size];
 
        if(nrhs != 1 || nlhs != 2)
        {
@@ -197,11 +199,9 @@
                return;
        }
 
-       mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1);
-
-       if(filename == NULL)
-       {
-               mexPrintf("Error: filename is NULL\n");
+       if(mxGetString(prhs[0], filename, filename_size) == 1){
+               mexPrintf("Error: wrong or too long filename\n");
+               fake_answer(nlhs, plhs);
                return;
        }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/python/README 
new/libsvm-3.36/python/README
--- old/libsvm-3.3/python/README        2022-08-10 16:06:26.000000000 +0200
+++ new/libsvm-3.36/python/README       2025-05-12 06:29:49.000000000 +0200
@@ -348,7 +348,7 @@
 The above command loads
     svm_train()            : train an SVM model
     svm_predict()          : predict testing data
-    svm_read_problem()     : read the data from a LIBSVM-format file.
+    svm_read_problem()     : read the data from a LIBSVM-format file or object.
     svm_load_model()       : load a LIBSVM model.
     svm_save_model()       : save model to a file.
     evaluations()          : evaluate prediction results.
@@ -448,6 +448,8 @@
     See the usage by examples:
 
     >>> y, x = svm_read_problem('data.txt')
+    >>> with open('data.txt') as f:
+    >>>     y, x = svm_read_problem(f)
     >>> m = svm_load_model('model_file')
     >>> svm_save_model('model_file', m)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/python/libsvm/commonutil.py 
new/libsvm-3.36/python/libsvm/commonutil.py
--- old/libsvm-3.3/python/libsvm/commonutil.py  2022-08-10 16:06:27.000000000 
+0200
+++ new/libsvm-3.36/python/libsvm/commonutil.py 2025-05-12 06:29:49.000000000 
+0200
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 from __future__ import print_function
 from array import array
 import sys
@@ -14,12 +12,12 @@
 
 __all__ = ['svm_read_problem', 'evaluations', 'csr_find_scale_param', 
'csr_scale']
 
-def svm_read_problem(data_file_name, return_scipy=False):
+def svm_read_problem(data_source, return_scipy=False):
     """
-    svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, 
x: list of dictionary
-    svm_read_problem(data_file_name, return_scipy=True)  -> [y, x], y: 
ndarray, x: csr_matrix
+    svm_read_problem(data_source, return_scipy=False) -> [y, x], y: list, x: 
list of dictionary
+    svm_read_problem(data_source, return_scipy=True)  -> [y, x], y: ndarray, 
x: csr_matrix
 
-    Read LIBSVM-format data from data_file_name and return labels y
+    Read LIBSVM-format data from data_source and return labels y
     and data instances x.
     """
     if scipy != None and return_scipy:
@@ -33,30 +31,43 @@
         row_ptr = [0]
         col_idx = []
     indx_start = 1
-    for i, line in enumerate(open(data_file_name)):
-        line = line.split(None, 1)
-        # In case an instance with all zero features
-        if len(line) == 1: line += ['']
-        label, features = line
-        prob_y.append(float(label))
-        if scipy != None and return_scipy:
-            nz = 0
-            for e in features.split():
-                ind, val = e.split(":")
-                if ind == '0':
-                    indx_start = 0
-                val = float(val)
-                if val != 0:
-                    col_idx.append(int(ind)-indx_start)
-                    prob_x.append(val)
-                    nz += 1
-            row_ptr.append(row_ptr[-1]+nz)
-        else:
-            xi = {}
-            for e in features.split():
-                ind, val = e.split(":")
-                xi[int(ind)] = float(val)
-            prob_x += [xi]
+
+    if hasattr(data_source, "read"):
+        file = data_source
+    else:
+        file = open(data_source)
+    try:
+        for line in file:
+            line = line.split(None, 1)
+            # In case an instance with all zero features
+            if len(line) == 1: line += ['']
+            label, features = line
+            prob_y.append(float(label))
+            if scipy != None and return_scipy:
+                nz = 0
+                for e in features.split():
+                    ind, val = e.split(":")
+                    if ind == '0':
+                        indx_start = 0
+                    val = float(val)
+                    if val != 0:
+                        col_idx.append(int(ind)-indx_start)
+                        prob_x.append(val)
+                        nz += 1
+                row_ptr.append(row_ptr[-1]+nz)
+            else:
+                xi = {}
+                for e in features.split():
+                    ind, val = e.split(":")
+                    xi[int(ind)] = float(val)
+                prob_x += [xi]
+    except Exception as err_msg:
+        raise err_msg
+    finally:
+        if not hasattr(data_source, "read"):
+            # close file only if it was created by us
+            file.close()
+
     if scipy != None and return_scipy:
         prob_y = np.frombuffer(prob_y, dtype='d')
         prob_x = np.frombuffer(prob_x, dtype='d')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/python/libsvm/svm.py 
new/libsvm-3.36/python/libsvm/svm.py
--- old/libsvm-3.3/python/libsvm/svm.py 2022-08-10 16:06:27.000000000 +0200
+++ new/libsvm-3.36/python/libsvm/svm.py        2025-05-12 06:29:51.000000000 
+0200
@@ -1,9 +1,8 @@
-#!/usr/bin/env python
-
 from ctypes import *
 from ctypes.util import find_library
 from os import path
 from glob import glob
+from enum import IntEnum
 import sys
 
 try:
@@ -19,10 +18,8 @@
     from itertools import izip as zip
 
 __all__ = ['libsvm', 'svm_problem', 'svm_parameter',
-           'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
-           'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
-           'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
-           'SIGMOID', 'c_double', 'svm_model']
+           'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 
'svm_forms',
+            'PRINT_STRING_FUN', 'kernel_names', 'c_double', 'svm_model']
 
 try:
     dirname = path.dirname(path.abspath(__file__))
@@ -34,7 +31,7 @@
         if sys.platform == 'win32':
             libsvm = CDLL(path.join(dirname, r'..\..\windows\libsvm.dll'))
         else:
-            libsvm = CDLL(path.join(dirname, '../../libsvm.so.3'))
+            libsvm = CDLL(path.join(dirname, '../../libsvm.so.4'))
     except:
     # For unix the prefix 'lib' is not considered.
         if find_library('svm'):
@@ -44,22 +41,32 @@
         else:
             raise Exception('LIBSVM library not found.')
 
-C_SVC = 0
-NU_SVC = 1
-ONE_CLASS = 2
-EPSILON_SVR = 3
-NU_SVR = 4
-
-LINEAR = 0
-POLY = 1
-RBF = 2
-SIGMOID = 3
-PRECOMPUTED = 4
+class svm_forms(IntEnum):
+    C_SVC = 0
+    NU_SVC = 1
+    ONE_CLASS = 2
+    EPSILON_SVR = 3
+    NU_SVR = 4
+
+class kernel_names(IntEnum):
+    LINEAR = 0
+    POLY = 1
+    RBF = 2
+    SIGMOID = 3
+    PRECOMPUTED = 4
 
 PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
 def print_null(s):
     return
 
+# In multi-threading, all threads share the same memory space of
+# the dynamic library (libsvm). Thus, we use a module-level
+# variable to keep a reference to ctypes print_null, preventing
+# python from garbage collecting it in thread B while thread A
+# still needs it. Check the usage of svm_set_print_string_function()
+# in LIBSVM README for details.
+ctypes_print_null = PRINT_STRING_FUN(print_null)
+
 def genFields(names, types):
     return list(zip(names, types))
 
@@ -101,7 +108,7 @@
             index_range = index_range[np.where(index_range <= feature_max)]
     elif isinstance(xi, (dict, list, tuple)):
         if isinstance(xi, dict):
-            index_range = xi.keys()
+            index_range = sorted(xi.keys())
         elif isinstance(xi, (list, tuple)):
             if not isKernel:
                 xi_shift = 1
@@ -110,11 +117,9 @@
                 index_range = range(0, len(xi)) # index starts from 0 for 
precomputed kernel
 
         if feature_max:
-            index_range = filter(lambda j: j <= feature_max, index_range)
+            index_range = list(filter(lambda j: j <= feature_max, index_range))
         if not isKernel:
-            index_range = filter(lambda j:xi[j-xi_shift] != 0, index_range)
-
-        index_range = sorted(index_range)
+            index_range = list(filter(lambda j:xi[j-xi_shift] != 0, 
index_range))
     else:
         raise TypeError('xi should be a dictionary, list, tuple, 1-d numpy 
array, or tuple of (index, data)')
 
@@ -123,9 +128,10 @@
 
     if scipy and isinstance(xi, tuple) and len(xi) == 2\
             and isinstance(xi[0], np.ndarray) and isinstance(xi[1], 
np.ndarray): # for a sparse vector
-        for idx, j in enumerate(index_range):
-            ret[idx].index = j
-            ret[idx].value = (xi[1])[idx]
+        # since xi=(indices, values), we must sort them simultaneously.
+        for idx, arg in enumerate(np.argsort(index_range)):
+            ret[idx].index = index_range[arg]
+            ret[idx].value = (xi[1])[arg]
     else:
         for idx, j in enumerate(index_range):
             ret[idx].index = j
@@ -140,10 +146,18 @@
     from numba import jit
     jit_enabled = True
 except:
-    jit = lambda x: x
+    # We need to support two cases: when jit is called with no arguments, and 
when jit is called with
+    # a keyword argument.
+    def jit(func=None, *args, **kwargs):
+        if func is None:
+            # This handles the case where jit is used with parentheses: 
@jit(nopython=True)
+            return lambda x: x
+        else:
+            # This handles the case where jit is used without parentheses: @jit
+            return func
     jit_enabled = False
 
-@jit
+@jit(nopython=True)
 def csr_to_problem_jit(l, x_val, x_ind, x_rowptr, prob_val, prob_ind, 
prob_rowptr, indx_start):
     for i in range(l):
         b1,e1 = x_rowptr[i], x_rowptr[i+1]
@@ -164,7 +178,9 @@
 
     # Extra space for termination node and (possibly) bias term
     x_space = prob.x_space = np.empty((x.nnz+x.shape[0]), dtype=svm_node)
-    prob.rowptr = x.indptr.copy()
+    # rowptr has to be a 64bit integer because it will later be used for 
pointer arithmetic,
+    # which overflows when the added pointer points to an address that is 
numerically high.
+    prob.rowptr = x.indptr.astype(np.int64, copy=True)
     prob.rowptr[1:] += np.arange(1,x.shape[0]+1)
     prob_ind = x_space["index"]
     prob_val = x_space["value"]
@@ -254,8 +270,8 @@
         return s
 
     def set_to_default_values(self):
-        self.svm_type = C_SVC;
-        self.kernel_type = RBF
+        self.svm_type = svm_forms.C_SVC;
+        self.kernel_type = kernel_names.RBF
         self.degree = 3
         self.gamma = 0
         self.coef0 = 0
@@ -289,10 +305,10 @@
         while i < len(argv):
             if argv[i] == "-s":
                 i = i + 1
-                self.svm_type = int(argv[i])
+                self.svm_type = svm_forms(int(argv[i]))
             elif argv[i] == "-t":
                 i = i + 1
-                self.kernel_type = int(argv[i])
+                self.kernel_type = kernel_names(int(argv[i]))
             elif argv[i] == "-d":
                 i = i + 1
                 self.degree = int(argv[i])
@@ -324,7 +340,7 @@
                 i = i + 1
                 self.probability = int(argv[i])
             elif argv[i] == "-q":
-                self.print_func = PRINT_STRING_FUN(print_null)
+                self.print_func = ctypes_print_null
             elif argv[i] == "-v":
                 i = i + 1
                 self.cross_validation = 1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/python/libsvm/svmutil.py 
new/libsvm-3.36/python/libsvm/svmutil.py
--- old/libsvm-3.3/python/libsvm/svmutil.py     2022-08-10 16:06:27.000000000 
+0200
+++ new/libsvm-3.36/python/libsvm/svmutil.py    2025-05-12 06:29:49.000000000 
+0200
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 import os, sys
 from .svm import *
 from .svm import __all__ as svm_all
@@ -95,7 +93,7 @@
         assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, 
(np.ndarray, sparse.spmatrix)))
         y, x, options = arg1, arg2, arg3
         param = svm_parameter(options)
-        prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
+        prob = svm_problem(y, x, isKernel=(param.kernel_type == 
kernel_names.PRECOMPUTED))
     elif isinstance(arg1, svm_problem):
         prob = arg1
         if isinstance(arg2, svm_parameter):
@@ -105,7 +103,7 @@
     if prob == None or param == None:
         raise TypeError("Wrong types for the arguments")
 
-    if param.kernel_type == PRECOMPUTED:
+    if param.kernel_type == kernel_names.PRECOMPUTED:
         for i in range(prob.l):
             xi = prob.x[i]
             idx, val = xi[0].index, xi[0].value
@@ -126,7 +124,7 @@
         target = (c_double * l)()
         libsvm.svm_cross_validation(prob, param, nr_fold, target)
         ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
-        if param.svm_type in [EPSILON_SVR, NU_SVR]:
+        if param.svm_type in [svm_forms.EPSILON_SVR, svm_forms.NU_SVR]:
             print("Cross Validation Mean squared error = %g" % MSE)
             print("Cross Validation Squared correlation coefficient = %g" % 
SCC)
             return MSE
@@ -214,7 +212,7 @@
         if not is_prob_model:
             raise ValueError("Model does not support probabiliy estimates")
 
-        if svm_type in [NU_SVR, EPSILON_SVR]:
+        if svm_type in [svm_forms.NU_SVR, svm_forms.EPSILON_SVR]:
             info("Prob. model for test data: target value = predicted value + 
z,\n"
             "z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % 
m.get_svr_probability());
             nr_class = 0
@@ -223,9 +221,9 @@
         for i in range(nr_instance):
             if scipy and isinstance(x, sparse.spmatrix):
                 indslice = slice(x.indptr[i], x.indptr[i+1])
-                xi, idx = gen_svm_nodearray((x.indices[indslice], 
x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
+                xi, idx = gen_svm_nodearray((x.indices[indslice], 
x.data[indslice]), isKernel=(m.param.kernel_type == kernel_names.PRECOMPUTED))
             else:
-                xi, idx = gen_svm_nodearray(x[i], 
isKernel=(m.param.kernel_type == PRECOMPUTED))
+                xi, idx = gen_svm_nodearray(x[i], 
isKernel=(m.param.kernel_type == kernel_names.PRECOMPUTED))
             label = libsvm.svm_predict_probability(m, xi, prob_estimates)
             values = prob_estimates[:nr_class]
             pred_labels += [label]
@@ -233,7 +231,7 @@
     else:
         if is_prob_model:
             info("Model supports probability estimates, but disabled in 
predicton.")
-        if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
+        if svm_type in [svm_forms.ONE_CLASS, svm_forms.EPSILON_SVR, 
svm_forms.NU_SVC]:
             nr_classifier = 1
         else:
             nr_classifier = nr_class*(nr_class-1)//2
@@ -241,9 +239,9 @@
         for i in range(nr_instance):
             if scipy and isinstance(x, sparse.spmatrix):
                 indslice = slice(x.indptr[i], x.indptr[i+1])
-                xi, idx = gen_svm_nodearray((x.indices[indslice], 
x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))
+                xi, idx = gen_svm_nodearray((x.indices[indslice], 
x.data[indslice]), isKernel=(m.param.kernel_type == kernel_names.PRECOMPUTED))
             else:
-                xi, idx = gen_svm_nodearray(x[i], 
isKernel=(m.param.kernel_type == PRECOMPUTED))
+                xi, idx = gen_svm_nodearray(x[i], 
isKernel=(m.param.kernel_type == kernel_names.PRECOMPUTED))
             label = libsvm.svm_predict_values(m, xi, dec_values)
             if(nr_class == 1):
                 values = [1]
@@ -256,7 +254,7 @@
         y = [0] * nr_instance
     ACC, MSE, SCC = evaluations(y, pred_labels)
 
-    if svm_type in [EPSILON_SVR, NU_SVR]:
+    if svm_type in [svm_forms.EPSILON_SVR, svm_forms.NU_SVR]:
         info("Mean squared error = %g (regression)" % MSE)
         info("Squared correlation coefficient = %g (regression)" % SCC)
     else:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/python/setup.py 
new/libsvm-3.36/python/setup.py
--- old/libsvm-3.3/python/setup.py      2022-08-10 16:06:27.000000000 +0200
+++ new/libsvm-3.36/python/setup.py     2025-05-12 06:29:49.000000000 +0200
@@ -16,7 +16,7 @@
 
 PACKAGE_DIR = "libsvm"
 PACKAGE_NAME = "libsvm-official"
-VERSION = "3.30.0"
+VERSION = "3.36.0"
 cpp_dir = "cpp-source"
 # should be consistent with dynamic_lib_name in libsvm/svm.py
 dynamic_lib_name = "clib"
@@ -30,6 +30,11 @@
     "svm.def",
 ]
 
+# license parameters
+license_source = path.join("..", "COPYRIGHT")
+license_file = "LICENSE"
+license_name = "BSD-3-Clause"
+
 kwargs_for_extension = {
     "sources": [path.join(cpp_dir, f) for f in source_codes],
     "depends": [path.join(cpp_dir, f) for f in headers],
@@ -37,12 +42,20 @@
     "language": "c++",
 }
 
-# see ../Makefile.win
+# see ../Makefile.win and enable openmp
 if sys.platform == "win32":
     kwargs_for_extension.update(
         {
             "define_macros": [("_WIN64", ""), ("_CRT_SECURE_NO_DEPRECATE", 
"")],
-            "extra_link_args": ["-DEF:{}\svm.def".format(cpp_dir)],
+            "extra_link_args": [r"-DEF:{}\svm.def".format(cpp_dir)],
+            "extra_compile_args": ["/openmp"],
+        }
+    )
+else:
+    kwargs_for_extension.update(
+        {
+            "extra_compile_args": ["-fopenmp"],
+            "extra_link_args": ["-fopenmp"],
         }
     )
 
@@ -59,7 +72,7 @@
 class CleanCommand(clean_cmd):
     def run(self):
         clean_cmd.run(self)
-        to_be_removed = ["build/", "dist/", "MANIFEST", cpp_dir, 
"{}.egg-info".format(PACKAGE_NAME)]
+        to_be_removed = ["build/", "dist/", "MANIFEST", cpp_dir, 
"{}.egg-info".format(PACKAGE_NAME), license_file]
         to_be_removed += glob("./{}/{}.*".format(PACKAGE_DIR, 
dynamic_lib_name))
         for root, dirs, files in os.walk(os.curdir, topdown=False):
             if "__pycache__" in dirs:
@@ -79,6 +92,9 @@
     if not path.exists(cpp_dir):
         create_cpp_source()
 
+    if not path.exists(license_file):
+        copyfile(license_source, license_file)
+
     with open("README") as f:
         long_description = f.read()
 
@@ -92,6 +108,7 @@
         author="ML group @ National Taiwan University",
         author_email="[email protected]",
         url="https://www.csie.ntu.edu.tw/~cjlin/libsvm";,
+        license=license_name,
         install_requires=["scipy"],
         ext_modules=[
             Extension(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/svm.cpp new/libsvm-3.36/svm.cpp
--- old/libsvm-3.3/svm.cpp      2022-08-10 16:06:29.000000000 +0200
+++ new/libsvm-3.36/svm.cpp     2025-05-12 06:29:50.000000000 +0200
@@ -8,6 +8,10 @@
 #include <limits.h>
 #include <locale.h>
 #include "svm.h"
+#ifdef _OPENMP
+#include <omp.h>
+#endif
+
 int libsvm_version = LIBSVM_VERSION;
 typedef float Qfloat;
 typedef signed char schar;
@@ -50,7 +54,7 @@
        char buf[BUFSIZ];
        va_list ap;
        va_start(ap,fmt);
-       vsprintf(buf,fmt,ap);
+       vsnprintf(buf,BUFSIZ,fmt,ap);
        va_end(ap);
        (*svm_print_string)(buf);
 }
@@ -67,7 +71,7 @@
 class Cache
 {
 public:
-       Cache(int l,long int size);
+       Cache(int l,size_t size);
        ~Cache();
 
        // request data [0,len)
@@ -77,7 +81,7 @@
        void swap_index(int i, int j);
 private:
        int l;
-       long int size;
+       size_t size;
        struct head_t
        {
                head_t *prev, *next;    // a circular list
@@ -91,12 +95,12 @@
        void lru_insert(head_t *h);
 };
 
-Cache::Cache(int l_,long int size_):l(l_),size(size_)
+Cache::Cache(int l_,size_t size_):l(l_),size(size_)
 {
        head = (head_t *)calloc(l,sizeof(head_t));      // initialized to 0
        size /= sizeof(Qfloat);
-       size -= l * sizeof(head_t) / sizeof(Qfloat);
-       size = max(size, 2 * (long int) l);     // cache must be large enough 
for two columns
+       size_t header_size = l * sizeof(head_t) / sizeof(Qfloat);
+       size = max(size, 2 * (size_t) l + header_size) - header_size;  // cache 
must be large enough for two columns
        lru_head.next = lru_head.prev = &lru_head;
 }
 
@@ -132,7 +136,7 @@
        if(more > 0)
        {
                // free old space
-               while(size < more)
+               while(size < (size_t)more)
                {
                        head_t *old = lru_head.next;
                        lru_delete(old);
@@ -144,7 +148,7 @@
 
                // allocate new space
                h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len);
-               size -= more;
+               size -= more;  // previous while loop guarantees size >= more 
and subtraction of size_t variable will not underflow
                swap(h->len,len);
        }
 
@@ -1270,7 +1274,7 @@
        :Kernel(prob.l, prob.x, param)
        {
                clone(y,y_,prob.l);
-               cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)));
+               cache = new Cache(prob.l,(size_t)(param.cache_size*(1<<20)));
                QD = new double[prob.l];
                for(int i=0;i<prob.l;i++)
                        QD[i] = (this->*kernel_function)(i,i);
@@ -1282,6 +1286,9 @@
                int start, j;
                if((start = cache->get_data(i,&data,len)) < len)
                {
+#ifdef _OPENMP
+#pragma omp parallel for private(j) schedule(guided)
+#endif
                        for(j=start;j<len;j++)
                                data[j] = 
(Qfloat)(y[i]*y[j]*(this->*kernel_function)(i,j));
                }
@@ -1319,7 +1326,7 @@
        ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param)
        :Kernel(prob.l, prob.x, param)
        {
-               cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)));
+               cache = new Cache(prob.l,(size_t)(param.cache_size*(1<<20)));
                QD = new double[prob.l];
                for(int i=0;i<prob.l;i++)
                        QD[i] = (this->*kernel_function)(i,i);
@@ -1366,7 +1373,7 @@
        :Kernel(prob.l, prob.x, param)
        {
                l = prob.l;
-               cache = new Cache(l,(long int)(param.cache_size*(1<<20)));
+               cache = new Cache(l,(size_t)(param.cache_size*(1<<20)));
                QD = new double[2*l];
                sign = new schar[2*l];
                index = new int[2*l];
@@ -1397,6 +1404,9 @@
                int j, real_i = index[i];
                if(cache->get_data(real_i,&data,l) < l)
                {
+#ifdef _OPENMP
+#pragma omp parallel for private(j) schedule(guided)
+#endif
                        for(j=0;j<l;j++)
                                data[j] = 
(Qfloat)(this->*kernel_function)(real_i,j);
                }
@@ -2219,11 +2229,9 @@
                        double *prob_density_marks = Malloc(double,nr_marks);
 
                        
if(svm_one_class_probability(prob,model,prob_density_marks) == 0)
-                       {
-                               model->prob_density_marks = 
Malloc(double,nr_marks);
-                               for(i=0;i<nr_marks;i++)
-                                       model->prob_density_marks[i] = 
prob_density_marks[i];
-                       }
+                               model->prob_density_marks = prob_density_marks;
+                       else
+                               free(prob_density_marks);
                }
 
                free(f.alpha);
@@ -2436,8 +2444,8 @@
        int nr_class;
        if (nr_fold > l)
        {
+               fprintf(stderr,"WARNING: # folds (%d) > # data (%d). Will use # 
folds = # data instead (i.e., leave-one-out cross validation)\n", nr_fold, l);
                nr_fold = l;
-               fprintf(stderr,"WARNING: # folds > # data. Will use # folds = # 
data instead (i.e., leave-one-out cross validation)\n");
        }
        fold_start = Malloc(int,nr_fold+1);
        // stratified cv may not give leave-one-out rate
@@ -2598,6 +2606,9 @@
        {
                double *sv_coef = model->sv_coef[0];
                double sum = 0;
+#ifdef _OPENMP
+#pragma omp parallel for private(i) reduction(+:sum) schedule(guided)
+#endif
                for(i=0;i<model->l;i++)
                        sum += sv_coef[i] * 
Kernel::k_function(x,model->SV[i],model->param);
                sum -= model->rho[0];
@@ -2614,6 +2625,9 @@
                int l = model->l;
 
                double *kvalue = Malloc(double,l);
+#ifdef _OPENMP
+#pragma omp parallel for private(i) schedule(guided)
+#endif
                for(i=0;i<l;i++)
                        kvalue[i] = 
Kernel::k_function(x,model->SV[i],model->param);
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/svm.h new/libsvm-3.36/svm.h
--- old/libsvm-3.3/svm.h        2022-08-10 16:06:29.000000000 +0200
+++ new/libsvm-3.36/svm.h       2025-05-12 06:29:50.000000000 +0200
@@ -1,7 +1,7 @@
 #ifndef _LIBSVM_H
 #define _LIBSVM_H
 
-#define LIBSVM_VERSION 330
+#define LIBSVM_VERSION 336
 
 #ifdef __cplusplus
 extern "C" {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libsvm-3.3/tools/grid.py 
new/libsvm-3.36/tools/grid.py
--- old/libsvm-3.3/tools/grid.py        2022-08-10 16:06:30.000000000 +0200
+++ new/libsvm-3.36/tools/grid.py       2025-05-12 06:29:51.000000000 +0200
@@ -283,7 +283,7 @@
         if options.grid_with_g:
             cmdline += ' -g {0} '.format(g)
         cmdline += ' -v {0} {1} {2} '.format\
-            (options.fold,options.pass_through_string,options.dataset_pathname)
+            (options.fold,options.pass_through_string,'"' + 
options.dataset_pathname + '"')
         return cmdline
 
 class LocalWorker(Worker):
Binary files old/libsvm-3.3/windows/libsvm.dll and 
new/libsvm-3.36/windows/libsvm.dll differ
Binary files old/libsvm-3.3/windows/libsvmread.mexw64 and 
new/libsvm-3.36/windows/libsvmread.mexw64 differ
Binary files old/libsvm-3.3/windows/libsvmwrite.mexw64 and 
new/libsvm-3.36/windows/libsvmwrite.mexw64 differ
Binary files old/libsvm-3.3/windows/svm-predict.exe and 
new/libsvm-3.36/windows/svm-predict.exe differ
Binary files old/libsvm-3.3/windows/svm-scale.exe and 
new/libsvm-3.36/windows/svm-scale.exe differ
Binary files old/libsvm-3.3/windows/svm-toy.exe and 
new/libsvm-3.36/windows/svm-toy.exe differ
Binary files old/libsvm-3.3/windows/svm-train.exe and 
new/libsvm-3.36/windows/svm-train.exe differ
Binary files old/libsvm-3.3/windows/svmpredict.mexw64 and 
new/libsvm-3.36/windows/svmpredict.mexw64 differ
Binary files old/libsvm-3.3/windows/svmtrain.mexw64 and 
new/libsvm-3.36/windows/svmtrain.mexw64 differ

Reply via email to