commit:     ac646f42de00d0fcada1326f887692fedef16879
Author:     Yahor Berdnikau <egorr.berd <AT> gmail <DOT> com>
AuthorDate: Sun Jan 26 09:02:59 2025 +0000
Commit:     Yahor Berdnikau <egorr.berd <AT> gmail <DOT> com>
CommitDate: Sun Jan 26 09:02:59 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=ac646f42

app-misc/ollama: Fixes build due to the upstream build system changes

Bug: https://bugs.gentoo.org/946268
Signed-off-by: Yahor Berdnikau <egorr.berd <AT> gmail.com>

 app-misc/ollama/metadata.xml       |  3 +-
 app-misc/ollama/ollama-9999.ebuild | 76 +++++++++++++++++++++++++++++---------
 2 files changed, 60 insertions(+), 19 deletions(-)

diff --git a/app-misc/ollama/metadata.xml b/app-misc/ollama/metadata.xml
index 352043225..4ebf01228 100644
--- a/app-misc/ollama/metadata.xml
+++ b/app-misc/ollama/metadata.xml
@@ -7,8 +7,7 @@
                <description>A copy from 
https://github.com/MrPenguin07/ebuilds/tree/master/dev-ml/ollama</description>
        </maintainer>
        <use>
-               <flag name="nvidia">Add support of nvidia</flag>
-               <flag name="amd">Add support of amd</flag>
+               <flag name="cuda">Enable NVIDIA CUDA support</flag>
        </use>
        <upstream>
                <remote-id type="github">ollama/ollama</remote-id>

diff --git a/app-misc/ollama/ollama-9999.ebuild 
b/app-misc/ollama/ollama-9999.ebuild
index c45ce6165..f0d2f0be2 100644
--- a/app-misc/ollama/ollama-9999.ebuild
+++ b/app-misc/ollama/ollama-9999.ebuild
@@ -1,9 +1,10 @@
-# Copyright 2024 Gentoo Authors
+# Copyright 2024-2025 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 EAPI=8
 
-inherit git-r3 go-module
+ROCM_VERSION=6.1
+inherit git-r3 go-module rocm
 
 DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other 
language models."
 HOMEPAGE="https://ollama.com";
@@ -11,7 +12,18 @@ EGIT_REPO_URI="https://github.com/ollama/ollama.git";
 LICENSE="MIT"
 SLOT="0"
 
-IUSE="nvidia amd"
+IUSE="cuda video_cards_amdgpu
+cpu_flags_x86_avx cpu_flags_x86_avx2
+cpu_flags_x86_avx512f cpu_flags_x86_avx512vbmi cpu_flags_x86_avx512_vnni 
cpu_flags_x86_avx512_bf16
+"
+
+REQUIRED_USE="
+       cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
+       cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
+       cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
+"
 
 RDEPEND="
        acct-group/ollama
@@ -22,15 +34,14 @@ BDEPEND="
        >=dev-lang/go-1.23.4
        >=dev-build/cmake-3.24
        >=sys-devel/gcc-11.4.0
-       nvidia? ( dev-util/nvidia-cuda-toolkit )
-       amd? (
-               sci-libs/clblast
-               dev-libs/rocm-opencl-runtime
+       cuda? ( dev-util/nvidia-cuda-toolkit )
+       video_cards_amdgpu? (
+               =sci-libs/hipBLAS-${ROCM_VERSION}*
        )
 "
 
 pkg_pretend() {
-       if use amd; then
+       if use video_cards_amdgpu || use cuda; then
                ewarn "WARNING: AMD & Nvidia support in this ebuild are 
experimental"
                einfo "If you run into issues, especially compiling 
dev-libs/rocm-opencl-runtime"
                einfo "you may try the docker image here 
https://github.com/ROCm/ROCm-docker";
@@ -44,20 +55,51 @@ src_unpack() {
        go-module_live_vendor
 }
 
+src_prepare() {
+       default
+
+       if use video_cards_amdgpu; then
+               # --hip-version gets appended to the compile flags which isn't 
a known flag.
+               # This causes rocm builds to fail because 
-Wunused-command-line-argument is turned on.
+               # Use nuclear option to fix this.
+               # Disable -Werror's from go modules.
+               find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || 
die
+       fi
+}
+
 src_compile() {
-       VERSION=$(
-               git describe --tags --first-parent --abbrev=7 --long --dirty 
--always \
-               | sed -e "s/^v//g"
-               assert
-       )
-       export GOFLAGS="'-ldflags=-w -s 
\"-X=github.com/ollama/ollama/version.Version=${VERSION}\"'"
+       CUSTOM_CPU_FLAGS=""
+       use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+="avx"
+       use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=",avx2"
+       use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=",avx512"
+       use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=",avx512vbmi"
+       use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=",avx512vnni"
+       use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=",avx512bf16"
 
-       ego generate ./...
-       ego build .
+       # Build basic ollama executable with cpu features built in
+       export CUSTOM_CPU_FLAGS
+
+       if use video_cards_amdgpu; then
+               export HIP_ARCHS=$(get_amdgpu_flags)
+               export HIP_PATH="/usr"
+       else
+               export OLLAMA_SKIP_ROCM_GENERATE=1
+       fi
+
+       if ! use cuda; then
+               export OLLAMA_SKIP_CUDA_GENERATE=1
+       fi
+       emake dist
 }
 
 src_install() {
-       dobin ollama
+       dobin dist/linux-${ARCH}/bin/ollama
+
+       if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
+               insinto /usr/lib
+               doins -r dist/linux-${ARCH}/lib/ollama
+       fi
+
        doinitd "${FILESDIR}"/ollama
 }
 

Reply via email to