🔖 Bump ik-llama.cpp-cuda version to r3826.ae0ba31f-1

This commit is contained in:
Orion 2025-08-01 10:02:12 +00:00
parent b8914f6ee8
commit 5750f47f82
2 changed files with 16 additions and 0 deletions

View file

@ -15,6 +15,11 @@ pkgbase = ik-llama.cpp-cuda
depends = gcc-libs
depends = glibc
depends = python
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = libggml
conflicts = ggml
@ -23,6 +28,8 @@ pkgbase = ik-llama.cpp-cuda
conflicts = llama.cpp-cuda
conflicts = llama.cpp-hip
conflicts = ik-llama.cpp
conflicts = ik-llama.cpp-hip
conflicts = ik-llama.cpp-vulkan
options = lto
options = !debug

View file

@ -20,6 +20,13 @@ makedepends=(
cmake
git
)
optdepends=(
'python-numpy: needed for convert_hf_to_gguf.py'
'python-safetensors: needed for convert_hf_to_gguf.py'
'python-sentencepiece: needed for convert_hf_to_gguf.py'
'python-pytorch: needed for convert_hf_to_gguf.py'
'python-transformers: needed for convert_hf_to_gguf.py'
)
conflicts=(
libggml
ggml
@ -28,6 +35,8 @@ conflicts=(
llama.cpp-cuda
llama.cpp-hip
ik-llama.cpp
ik-llama.cpp-hip
ik-llama.cpp-vulkan
)
provides=(llama.cpp)