commit:     d8c38483068a6c6fddff7436972afcc4b9948587
Author:     Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Tue Feb  3 23:02:28 2026 +0000
Commit:     Sergey Alirzaev <zl29ah <AT> gmail <DOT> com>
CommitDate: Tue Feb  3 23:02:28 2026 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=d8c38483

sci-misc/llama-cpp: add 0_pre7924, drop 0_pre6318

bump for ngram hasher support

Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>

 sci-misc/llama-cpp/Manifest                        |  2 +-
 ...0_pre6318.ebuild => llama-cpp-0_pre7924.ebuild} | 81 ++++++++++++++++------
 2 files changed, 60 insertions(+), 23 deletions(-)

diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest
index 5c86cf9090..1659db8229 100644
--- a/sci-misc/llama-cpp/Manifest
+++ b/sci-misc/llama-cpp/Manifest
@@ -1,8 +1,8 @@
 DIST 
ggml-org_models_tinyllamas_stories15M-q4_0-99dd1a73db5a37100bd4ae633f4cfce6560e1567.gguf
 19077344 BLAKE2B 
16e65adf9785e3091c51f1de59e5580f93fb47f79961513aeb3dbb8a0f5930f7120f0304f0f293a006170805e2b70ee1fcff0496b63356323d32c2caa55be8a8
 SHA512 
f9944886089958e0d97b1906cfd45020e0821c65429346e76fae29136c634ae5d039dffbae5933a95b0674f4acd87b656feb9f9e1b16dd434c5c9b5886f4f617
-DIST llama-cpp-0_pre6318.tar.gz 25626090 BLAKE2B 
b95826a5fd4ab27927d390cdc091648d1ffe281d5d9946fdfa4e6c8c59fb7461dd1e2b83751c86c575b4f00207bbd0cfbe467a0ae9dfdb3b192356bc77e0f808
 SHA512 
f3b5655123919a76fa27f1be05ffb2a7f681d7793d4d9e24106739a21846a2918ffdf9ef326ac99a55f6b4943059e4f76de754da894ff6fdd7e2d56a41edc56b
 DIST llama-cpp-0_pre6710.tar.gz 25894417 BLAKE2B 
147f30d76fd49bf18fa0ab9e3e75d0ad337dcd87a73f1dbce43f180488ea06b40b1a2a93b4686a88b5a442dd4dd6a8e45bf848ceb549bdc0ad0078427336c56e
 SHA512 
75c5918713256cb11f704b94d6e249a9f3ac2dde1107a6f4506134ba9c772e1c42d991915b571887207003f4b0679a183cd0787ffd742a08d2283fdfb86695eb
 DIST llama-cpp-0_pre6980.tar.gz 26431911 BLAKE2B 
b7d7c0dcdabde01acb816e73bc344564823dd1fc498fb98bf3c611b2d7a964af4d94f7cad533fe675a30685d510829160e392ab0f3bd16f4757a2f3446b8e3ca
 SHA512 
33e63336ad7c0fc653acd409d9314ce3fc3755ed1c03b4806c647b7c80d91b3c883aec6633334555c3855a24276d4975a54c96af91df8d2f818d4dd1dbcbabfb
 DIST llama-cpp-0_pre7276.tar.gz 27765814 BLAKE2B 
d0553ab1dd29c9d93a18c6217aab4553faf09e385a94b90732a537bbcf9bded54d5cda28553543e2c0cc71b6a157bfb80a48405f3f8281c51525757967b33e16
 SHA512 
3035fe53fea2ca3b0f35e479f4eaec75e38a2ea670600445776cd6fa696fc83ca19eb6dd7cd2ab1da69e78293c62318b5182e6e5b3423ae6c1f00854c5132a4c
 DIST llama-cpp-0_pre7611.tar.gz 28622786 BLAKE2B 
3c345645c9bcf07d8a513b9e883619b31b5254581f73429d638403758429fd2dfc5f78a22d538e8d88eb6c1be74bf805481af697480727ed750492ddec5c37fe
 SHA512 
c6c4780d7e68adfc385b57c6f7530423f8205bfa283572b0d414d55e143c03307e98676e41ad527c37d7837f831f8ff24be0f7bf59e366ea82f3802cdc946821
 DIST llama-cpp-0_pre7770.tar.gz 28797089 BLAKE2B 
0ad614f16c19ff1339571dd90be566ff4ccedfd991dfeb948f0cedc54f8447a3e72e35ac392bb60d5bc8a44d1757be3bea0123eac02b694f5f0a8f2c2b941b6a
 SHA512 
78896fdcf05330bb4b1fd86a985da56882d0166c6f276afda273dca183e4c7365decd5e9630c8d633e065162cbb06d6d8e4fdcf76be9768b238ee5053abaa3aa
 DIST llama-cpp-0_pre7836.tar.gz 28813563 BLAKE2B 
c843c0199b528114c23c58536552e2a7f581fda93364353e8aa38fda99ddb1942c421ad00be4daf86b49a2c311ef89bb6908b8900ca1ffb64df338cbfa11c354
 SHA512 
161176107de175d9b6fdc1ebfa9fe0cf6fd6968245730f7df7f17c6a179ef111685d0fed5a45031825dc4b07db270887a4307061f9918e375a1c65f261062d9d
+DIST llama-cpp-0_pre7924.tar.gz 28899921 BLAKE2B 
b89c8c170d1c2d52390dfef35a4e0857b4a3aa174077f2fdfa0fbbe1a254c5e6ae9b976bde9f52e42ed3932403aec728b9c415ca44ccd22c061e7e8a1e481526
 SHA512 
689c73215e795a53be1d0e6a639983aded1ed7bdeb3a261556de3db2cfdc765a7daf4aa142433f5e99a255dc123035a929b57868d3941ddda400a3c2b1db31cb

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre6318.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre7924.ebuild
similarity index 64%
rename from sci-misc/llama-cpp/llama-cpp-0_pre6318.ebuild
rename to sci-misc/llama-cpp/llama-cpp-0_pre7924.ebuild
index acbdfc0735..ff61000fd0 100644
--- a/sci-misc/llama-cpp/llama-cpp-0_pre6318.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre7924.ebuild
@@ -1,4 +1,4 @@
-# Copyright 2025 Gentoo Authors
+# Copyright 2026 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 EAPI=8
@@ -7,24 +7,45 @@ ROCM_VERSION="6.3"
 
 inherit cmake cuda rocm linux-info
 
-if [[ "${PV}" != "9999" ]]; then
-       KEYWORDS="~amd64"
-       MY_PV="b${PV#0_pre}"
-       S="${WORKDIR}/llama.cpp-${MY_PV}"
-       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
-else
+TINY_LLAMAS_COMMIT="99dd1a73db5a37100bd4ae633f4cfce6560e1567"
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggml-org/llama.cpp";
+
+if [[ ${PV} == *9999* ]]; then
        inherit git-r3
        EGIT_REPO_URI="https://github.com/ggml-org/llama.cpp.git";
+else
+       MY_PV="b${PV#0_pre}"
+       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
+       S="${WORKDIR}/llama.cpp-${MY_PV}"
+       KEYWORDS="~amd64"
 fi
 
-DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
-HOMEPAGE="https://github.com/ggml-org/llama.cpp";
+SRC_URI+="
+       examples? (
+               
https://huggingface.co/ggml-org/tiny-llamas/resolve/${TINY_LLAMAS_COMMIT}/stories15M-q4_0.gguf
+                       -> 
ggml-org_models_tinyllamas_stories15M-q4_0-${TINY_LLAMAS_COMMIT}.gguf
+       )
+"
 
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas +openmp blis hip cuda opencl vulkan"
-REQUIRED_USE="?? ( openblas blis )"
+
+# wwma USE explained here: 
https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md#hip
+IUSE="curl openblas +openmp blis rocm cuda opencl vulkan flexiblas wmma 
examples"
+
+REQUIRED_USE="
+       ?? (
+               openblas
+               blis
+               flexiblas
+       )
+       wmma? (
+               rocm
+       )
+"
 
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
@@ -33,8 +54,13 @@ CDEPEND="
        openblas? ( sci-libs/openblas:= )
        openmp? ( llvm-runtimes/openmp:= )
        blis? ( sci-libs/blis:= )
-       hip? ( >=dev-util/hip-6.3:=
-               >=sci-libs/hipBLAS-6.3:=
+       flexiblas? ( sci-libs/flexiblas:= )
+       rocm? (
+               >=dev-util/hip-${ROCM_VERSION}:=
+               >=sci-libs/hipBLAS-${ROCM_VERSION}:=
+               wmma? (
+                       >=sci-libs/rocWMMA-${ROCM_VERSION}:=
+               )
        )
        cuda? ( dev-util/nvidia-cuda-toolkit:= )
 "
@@ -50,37 +76,41 @@ RDEPEND="${CDEPEND}
 BDEPEND="media-libs/shaderc"
 
 pkg_setup() {
-       if use hip; then
+       if use rocm; then
                linux-info_pkg_setup
                if linux-info_get_any_version && linux_config_exists; then
                        if ! linux_chkconfig_present HSA_AMD_SVM; then
                                ewarn "To use ROCm/HIP, you need to have 
HSA_AMD_SVM option enabled in your kernel."
                        fi
                fi
-
        fi
 }
 
 src_prepare() {
        use cuda && cuda_src_prepare
-
        cmake_src_prepare
+       if use examples; then
+               mkdir -p "${BUILD_DIR}/tinyllamas" || die
+               cp 
"${DISTDIR}/ggml-org_models_tinyllamas_stories15M-q4_0-${TINY_LLAMAS_COMMIT}.gguf"
 \
+                       "${BUILD_DIR}/tinyllamas/stories15M-q4_0.gguf" || die
+       fi
 }
 
 src_configure() {
        local mycmakeargs=(
                -DLLAMA_BUILD_TESTS=OFF
+               -DLLAMA_BUILD_EXAMPLES=$(usex examples)
                -DLLAMA_BUILD_SERVER=ON
                -DCMAKE_SKIP_BUILD_RPATH=ON
                -DGGML_NATIVE=0 # don't set march
                -DGGML_RPC=ON
-               -DLLAMA_CURL=$(usex curl ON OFF)
+               -DLLAMA_CURL=$(usex curl)
                -DBUILD_NUMBER="1"
                -DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
-               -DGGML_CUDA=$(usex cuda ON OFF)
-               -DGGML_OPENCL=$(usex opencl ON OFF)
-               -DGGML_OPENMP=$(usex openmp ON OFF)
-               -DGGML_VULKAN=$(usex vulkan ON OFF)
+               -DGGML_CUDA=$(usex cuda)
+               -DGGML_OPENCL=$(usex opencl)
+               -DGGML_OPENMP=$(usex openmp)
+               -DGGML_VULKAN=$(usex vulkan)
 
                # avoid clashing with whisper.cpp
                -DCMAKE_INSTALL_LIBDIR="${EPREFIX}/usr/$(get_libdir)/llama.cpp"
@@ -99,6 +129,12 @@ src_configure() {
                )
        fi
 
+       if use flexiblas; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS
+               )
+       fi
+
        if use cuda; then
                local -x CUDAHOSTCXX="$(cuda_gccdir)"
                # tries to recreate dev symlinks
@@ -106,10 +142,11 @@ src_configure() {
                addpredict "/dev/char/"
        fi
 
-       if use hip; then
+       if use rocm; then
                rocm_use_hipcc
                mycmakeargs+=(
                        -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
+                       -DGGML_HIP_ROCWMMA_FATTN=$(usex wmma)
                )
        fi
 

Reply via email to