commit:     0e23d436589a5ed76dd10ff0e35964f691b809c7
Author:     Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
AuthorDate: Sat Feb  8 19:29:18 2025 +0000
Commit:     Florian Schmaus <flow <AT> gentoo <DOT> org>
CommitDate: Sat Feb  8 21:42:29 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=0e23d436

app-misc/ollama: update to 0.5.7-r1

Bug: https://bugs.gentoo.org/948424
Bug: https://bugs.gentoo.org/935842
Closes: https://bugs.gentoo.org/920301
Signed-off-by: Paul Zander <negril.nx+gentoo <AT> gmail.com>

 .../files/ollama-0.5.7-include-cstdint.patch       |  24 +++
 app-misc/ollama/files/{ollama => ollama.init}      |   2 +
 app-misc/ollama/metadata.xml                       |   1 +
 app-misc/ollama/ollama-0.5.7-r1.ebuild             | 176 +++++++++++++++++++++
 app-misc/ollama/ollama-0.5.7.ebuild                | 115 --------------
 5 files changed, 203 insertions(+), 115 deletions(-)

diff --git a/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch 
b/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch
new file mode 100644
index 000000000..14975ca42
--- /dev/null
+++ b/app-misc/ollama/files/ollama-0.5.7-include-cstdint.patch
@@ -0,0 +1,24 @@
+From d711567ba482e80520b5cc36026c80f55f721319 Mon Sep 17 00:00:00 2001
+From: Paul Zander <[email protected]>
+Date: Sat, 25 Jan 2025 19:00:31 +0100
+Subject: [PATCH] include cstdint
+
+---
+ llama/llama-mmap.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llama/llama-mmap.h b/llama/llama-mmap.h
+index ebd7dc16..4c8e3929 100644
+--- a/llama/llama-mmap.h
++++ b/llama/llama-mmap.h
+@@ -26,6 +26,7 @@
+ 
+ #pragma once
+ 
++#include <cstdint>
+ #include <memory>
+ #include <vector>
+ 
+-- 
+2.48.0
+

diff --git a/app-misc/ollama/files/ollama b/app-misc/ollama/files/ollama.init
similarity index 89%
rename from app-misc/ollama/files/ollama
rename to app-misc/ollama/files/ollama.init
index 9359f48a1..17e632899 100644
--- a/app-misc/ollama/files/ollama
+++ b/app-misc/ollama/files/ollama.init
@@ -1,4 +1,6 @@
 #!/sbin/openrc-run
+# Copyright 1999-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License, v2
 
 description="Ollama Service"
 command="/usr/bin/ollama"

diff --git a/app-misc/ollama/metadata.xml b/app-misc/ollama/metadata.xml
index 4ebf01228..cb4ddec8a 100644
--- a/app-misc/ollama/metadata.xml
+++ b/app-misc/ollama/metadata.xml
@@ -8,6 +8,7 @@
        </maintainer>
        <use>
                <flag name="cuda">Enable NVIDIA CUDA support</flag>
+               <flag name="rocm">Enable ROCm gpu computing support</flag>
        </use>
        <upstream>
                <remote-id type="github">ollama/ollama</remote-id>

diff --git a/app-misc/ollama/ollama-0.5.7-r1.ebuild 
b/app-misc/ollama/ollama-0.5.7-r1.ebuild
new file mode 100644
index 000000000..e95741936
--- /dev/null
+++ b/app-misc/ollama/ollama-0.5.7-r1.ebuild
@@ -0,0 +1,176 @@
+# Copyright 2024-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION=6.1
+inherit cuda rocm
+inherit go-module
+
+DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other 
language models."
+HOMEPAGE="https://ollama.com";
+
+if [[ ${PV} == *9999* ]]; then
+       inherit git-r3
+       EGIT_REPO_URI="https://github.com/ollama/ollama.git";
+else
+       SRC_URI="
+               https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz 
-> ${P}.gh.tar.gz
+               
https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz
+       "
+       KEYWORDS="~amd64"
+fi
+
+LICENSE="MIT"
+SLOT="0"
+
+X86_CPU_FLAGS=(
+       avx
+       avx2
+       avx512f
+       avx512vbmi
+       avx512_vnni
+       avx512_bf16
+)
+CPU_FLAGS=( "${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}" )
+IUSE="${CPU_FLAGS[*]} cuda rocm"
+
+REQUIRED_USE="
+       cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
+       cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
+       cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
+"
+
+DEPEND="
+       >=dev-lang/go-1.23.4
+       cuda? (
+               dev-util/nvidia-cuda-toolkit:=
+       )
+       rocm? (
+               >=sci-libs/hipBLAS-${ROCM_VERSION}:=[${ROCM_USEDEP}]
+       )
+"
+
+RDEPEND="
+       acct-group/${PN}
+       acct-user/${PN}
+"
+
+PATCHES=(
+       "${FILESDIR}/${PN}-0.5.7-include-cstdint.patch"
+)
+
+pkg_pretend() {
+       if use rocm; then
+               ewarn "WARNING: AMD support in this ebuild are experimental"
+               einfo "If you run into issues, especially compiling 
dev-libs/rocm-opencl-runtime"
+               einfo "you may try the docker image here 
https://github.com/ROCm/ROCm-docker";
+               einfo "and follow instructions here"
+               einfo 
"https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html";
+       fi
+}
+
+src_prepare() {
+       default
+
+       sed \
+               -e "s/(CFLAGS)/(NVCCFLAGS)/g" \
+               -e "s/(CXXFLAGS)/(NVCCFLAGS)/g" \
+               -i make/cuda.make || die
+
+       if use rocm; then
+               # --hip-version gets appended to the compile flags which isn't 
a known flag.
+               # This causes rocm builds to fail because 
-Wunused-command-line-argument is turned on.
+               # Use nuclear option to fix this.
+               # Disable -Werror's from go modules.
+               find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || 
die
+       fi
+}
+
+src_configure() {
+       local CUSTOM_CPU_FLAGS=()
+       use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+=( "avx" )
+       use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=( "avx2" )
+       use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=( "avx512" )
+       use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=( "avx512vbmi" )
+       use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=( "avx512vnni" )
+       use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=( "avx512bf16" )
+
+       # Build basic ollama executable with cpu features built in
+       emakeargs=(
+               # CCACHE=""
+               "CUSTOM_CPU_FLAGS=$( IFS=','; echo "${CUSTOM_CPU_FLAGS[*]}")"
+       )
+
+       if use cuda; then
+               export NVCC_CCBIN
+               NVCC_CCBIN="$(cuda_gccdir)"
+
+               if [[ -n ${CUDAARCHS} ]]; then
+                       emakeargs+=(
+                               CUDA_ARCHITECTURES="${CUDAARCHS}"
+                       )
+               fi
+
+               if has_version "=dev-util/nvidia-cuda-toolkit-12*"; then
+                       emakeargs+=(
+                               
CUDA_12_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
+                               CUDA_12_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
+                       )
+               fi
+
+               if has_version "=dev-util/nvidia-cuda-toolkit-11*"; then
+                       emakeargs+=(
+                               
CUDA_11_COMPILER="${CUDA_PATH:=${EPREFIX}/opt/cuda}/bin/nvcc"
+                               CUDA_11_PATH="${CUDA_PATH:=${EPREFIX}/opt/cuda}"
+                       )
+               fi
+
+               cuda_add_sandbox -w
+       else
+               emakeargs+=( OLLAMA_SKIP_CUDA_GENERATE="1" )
+       fi
+
+       if use rocm; then
+               emakeargs+=(
+                       HIP_ARCHS="$(get_amdgpu_flags)"
+                       HIP_PATH="${EPREFIX}/usr"
+               )
+
+               check_amdgpu
+       else
+               emakeargs+=( OLLAMA_SKIP_ROCM_GENERATE="1" )
+       fi
+
+       emake "${emakeargs[@]}" help-runners
+       export emakeargs
+}
+
+src_compile() {
+       emake "${emakeargs[@]}" dist
+}
+
+src_install() {
+       dobin "dist/linux-${ARCH}/bin/ollama"
+
+       if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
+               insinto /usr/lib
+               doins -r "dist/linux-${ARCH}/lib/ollama"
+       fi
+
+       doinitd "${FILESDIR}"/ollama.init
+}
+
+pkg_preinst() {
+       keepdir /var/log/ollama
+       fowners ollama:ollama /var/log/ollama
+}
+
+pkg_postinst() {
+       einfo "Quick guide:"
+       einfo "ollama serve"
+       einfo "ollama run llama3:70b"
+       einfo "See available models at https://ollama.com/library";
+}

diff --git a/app-misc/ollama/ollama-0.5.7.ebuild 
b/app-misc/ollama/ollama-0.5.7.ebuild
deleted file mode 100644
index f1373d6fa..000000000
--- a/app-misc/ollama/ollama-0.5.7.ebuild
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2024-2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-ROCM_VERSION=6.1
-inherit go-module rocm
-
-DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other 
language models."
-HOMEPAGE="https://ollama.com";
-SRC_URI="https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz -> 
${P}.gh.tar.gz"
-SRC_URI+=" 
https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz";
-S="${WORKDIR}/${PN}-${PV}"
-LICENSE="MIT"
-SLOT="0"
-
-KEYWORDS="~amd64"
-
-IUSE="cuda video_cards_amdgpu
-cpu_flags_x86_avx cpu_flags_x86_avx2
-cpu_flags_x86_avx512f cpu_flags_x86_avx512vbmi cpu_flags_x86_avx512_vnni 
cpu_flags_x86_avx512_bf16
-"
-
-REQUIRED_USE="
-       cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
-       cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
-       cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
-       cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
-       cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
-"
-
-RDEPEND="
-       acct-group/ollama
-       acct-user/ollama
-"
-
-DEPEND="
-       >=dev-lang/go-1.23.4
-       >=dev-build/cmake-3.24
-       >=sys-devel/gcc-11.4.0
-       cuda? ( dev-util/nvidia-cuda-toolkit )
-       video_cards_amdgpu? (
-               sci-libs/hipBLAS[${ROCM_USEDEP}]
-       )
-"
-
-pkg_pretend() {
-       if use video_cards_amdgpu; then
-               ewarn "WARNING: AMD support in this ebuild are experimental"
-               einfo "If you run into issues, especially compiling 
dev-libs/rocm-opencl-runtime"
-               einfo "you may try the docker image here 
https://github.com/ROCm/ROCm-docker";
-               einfo "and follow instructions here"
-               einfo 
"https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html";
-       fi
-}
-
-src_prepare() {
-       default
-
-       if use video_cards_amdgpu; then
-               # --hip-version gets appended to the compile flags which isn't 
a known flag.
-               # This causes rocm builds to fail because 
-Wunused-command-line-argument is turned on.
-               # Use nuclear option to fix this.
-               # Disable -Werror's from go modules.
-               find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || 
die
-       fi
-}
-
-src_compile() {
-       CUSTOM_CPU_FLAGS=""
-       use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+="avx"
-       use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=",avx2"
-       use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=",avx512"
-       use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=",avx512vbmi"
-       use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=",avx512vnni"
-       use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=",avx512bf16"
-
-       # Build basic ollama executable with cpu features built in
-       export CUSTOM_CPU_FLAGS
-
-       if use video_cards_amdgpu; then
-               export HIP_ARCHS=$(get_amdgpu_flags)
-               export HIP_PATH="/usr"
-       else
-               export OLLAMA_SKIP_ROCM_GENERATE=1
-       fi
-
-       if ! use cuda; then
-               export OLLAMA_SKIP_CUDA_GENERATE=1
-       fi
-       emake dist
-}
-
-src_install() {
-       dobin dist/linux-${ARCH}/bin/ollama
-
-       if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
-               insinto /usr/lib
-               doins -r dist/linux-${ARCH}/lib/ollama
-       fi
-
-       doinitd "${FILESDIR}"/ollama
-}
-
-pkg_preinst() {
-       keepdir /var/log/ollama
-       fowners ollama:ollama /var/log/ollama
-}
-
-pkg_postinst() {
-       einfo "Quick guide:"
-       einfo "ollama serve"
-       einfo "ollama run llama3:70b"
-       einfo "See available models at https://ollama.com/library";
-}

Reply via email to