Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package llamacpp for openSUSE:Factory 
checked in at 2025-04-20 09:36:21
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/llamacpp (Old)
 and      /work/SRC/openSUSE:Factory/.llamacpp.new.30101 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "llamacpp"

Sun Apr 20 09:36:21 2025 rev:6 rq:1271007 version:5158

Changes:
--------
--- /work/SRC/openSUSE:Factory/llamacpp/llamacpp.changes        2025-03-17 
22:21:21.143900891 +0100
+++ /work/SRC/openSUSE:Factory/.llamacpp.new.30101/llamacpp.changes     
2025-04-20 20:02:18.554778936 +0200
@@ -1,0 +2,51 @@
+Sat Apr 19 21:35:38 UTC 2025 - Eyad Issa <eyadlore...@gmail.com>
+
+- Remove convert_hf_to_gguf.py
+
+- Update to version 5158:
+    * Added support for new models:
+        ~ Llama 4 text-only
+        ~ IBM Granite 3.3 FIM tokens
+        ~ Qwen3 and Qwen3MoE
+        ~ BailingMoE (Ling)
+        ~ Trillion 7B model
+        ~ PLM GGUF Conversion & Inference
+        ~ RWKV v7 architecture
+        ~ GPT2, Bloom and CodeShell tied word embeddings
+        ~ EXAONE tied word embeddings
+        ~ DeepSeek V2/V3 MLA implementation
+        ~ Gemma 3 fixes and improvements
+    * Improved hardware acceleration support:
+        ~ Vulkan: Multiple optimizations for flash attention,
+          coopmat2, and shader performance
+        ~ OpenCL: Fixed profiling, improved Adreno GPU
+          identification, added multi and vision rope
+    * Performance optimizations:
+        ~ AVX512 implementation of GEMM for Q4_Kx8
+        ~ Faster SSM scan
+        ~ Block interleaving support for Q4_K quantization
+          on x86 AVX2
+        ~ PowerPC-specific optimizations
+    * Infrastructure improvements:
+        ~ Added ability to lazy-load safetensors remotely
+          without downloading
+        ~ Refactored downloading system to handle mmproj
+          with -hf option
+        ~ Added support for custom HF endpoint
+        ~ Added RPC backend with added commands
+        ~ Improved server with support for listening on unix
+          sockets
+    * Added image processing capabilities:
+        ~ Introduced libmtmd for image token handling
+        ~ Added image_manipulation and llava_uhd classes
+        ~ Fixed CPU-only CLIP image encoding
+        ~ Fixed clip loading GGUFs with missing description
+    * Bug fixes:
+        ~ Fixed compilation issues on various platforms
+          (s390x, POWER9, AIX, FreeBSD)
+        ~ Fixed memory leaks and allocation issues
+        ~ Fixed Ctrl+D/newline handling
+        ~ Fixed thread joining on server exit
+        ~ Fixed various backend-specific bugs
+
+-------------------------------------------------------------------

Old:
----
  llamacpp-4889.obscpio

New:
----
  llamacpp-5158.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ llamacpp.spec ++++++
--- /var/tmp/diff_new_pack.dhBw9F/_old  2025-04-20 20:02:19.430815512 +0200
+++ /var/tmp/diff_new_pack.dhBw9F/_new  2025-04-20 20:02:19.434815679 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           llamacpp
-Version:        4889
+Version:        5158
 Release:        0
 Summary:        llama-cli tool to run inference using the llama.cpp library
 License:        MIT
@@ -141,16 +141,13 @@
 
 # used for shader compilation only
 rm %{buildroot}%{_bindir}/vulkan-shaders-gen
-
-# remove .py extension
-mv %{buildroot}%{_bindir}/convert_hf_to_gguf.py 
%{buildroot}%{_bindir}/convert_hf_to_gguf
+# remove dev scripts
+rm %{buildroot}%{_bindir}/convert_hf_to_gguf.py
 
 %files
 %doc README.md
 %license LICENSE
 
-%{_bindir}/convert_hf_to_gguf
-
 %{_bindir}/llama-cli
 %{_bindir}/llama-server
 %{_bindir}/llama-bench

++++++ _service ++++++
--- /var/tmp/diff_new_pack.dhBw9F/_old  2025-04-20 20:02:19.490818017 +0200
+++ /var/tmp/diff_new_pack.dhBw9F/_new  2025-04-20 20:02:19.490818017 +0200
@@ -2,9 +2,9 @@
   <service name="format_spec_file" mode="manual" />
   <service name="obs_scm" mode="manual">
     <param name="filename">llamacpp</param>
-    <param name="url">https://github.com/ggerganov/llama.cpp.git</param>
+    <param name="url">https://github.com/ggml-org/llama.cpp.git</param>
     <param name="scm">git</param>
-    <param name="revision">b4889</param>
+    <param name="revision">b5158</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="versionrewrite-pattern">b(.*)</param>
     <param name="changesgenerate">enable</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.dhBw9F/_old  2025-04-20 20:02:19.514819020 +0200
+++ /var/tmp/diff_new_pack.dhBw9F/_new  2025-04-20 20:02:19.514819020 +0200
@@ -1,6 +1,8 @@
 <servicedata>
 <service name="tar_scm">
                 <param 
name="url">https://github.com/ggerganov/llama.cpp.git</param>
-              <param 
name="changesrevision">9f2250ba722738ec0e6ab684636268a79160c854</param></service></servicedata>
+              <param 
name="changesrevision">9f2250ba722738ec0e6ab684636268a79160c854</param></service><service
 name="tar_scm">
+                <param 
name="url">https://github.com/ggml-org/llama.cpp.git</param>
+              <param 
name="changesrevision">00137157fca3d17b90380762b4d7cc158d385bd3</param></service></servicedata>
 (No newline at EOF)
 

++++++ llamacpp-4889.obscpio -> llamacpp-5158.obscpio ++++++
++++ 96493 lines of diff (skipped)

++++++ llamacpp.obsinfo ++++++
--- /var/tmp/diff_new_pack.dhBw9F/_old  2025-04-20 20:02:24.299018767 +0200
+++ /var/tmp/diff_new_pack.dhBw9F/_new  2025-04-20 20:02:24.331020103 +0200
@@ -1,5 +1,5 @@
 name: llamacpp
-version: 4889
-mtime: 1741970480
-commit: 9f2250ba722738ec0e6ab684636268a79160c854
+version: 5158
+mtime: 1745078703
+commit: 00137157fca3d17b90380762b4d7cc158d385bd3
 

Reply via email to