🔖 Bump ik-llama.cpp version to r3826.ae0ba31f-1

This commit is contained in:
Orion 2025-08-01 10:02:05 +00:00
parent df99b5037a
commit 39cde2a1e3
2 changed files with 17 additions and 1 deletions

View file

@ -16,6 +16,11 @@ pkgbase = ik-llama.cpp
depends = gcc-libs
depends = glibc
depends = python
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = libggml
conflicts = ggml
@ -24,6 +29,8 @@ pkgbase = ik-llama.cpp
conflicts = llama.cpp-cuda
conflicts = llama.cpp-hip
conflicts = ik-llama.cpp-cuda
conflicts = ik-llama.cpp-hip
conflicts = ik-llama.cpp-vulkan
options = lto
options = !debug

View file

@ -21,6 +21,13 @@ makedepends=(
cmake
git
)
optdepends=(
'python-numpy: needed for convert_hf_to_gguf.py'
'python-safetensors: needed for convert_hf_to_gguf.py'
'python-sentencepiece: needed for convert_hf_to_gguf.py'
'python-pytorch: needed for convert_hf_to_gguf.py'
'python-transformers: needed for convert_hf_to_gguf.py'
)
conflicts=(
libggml
ggml
@ -29,6 +36,8 @@ conflicts=(
llama.cpp-cuda
llama.cpp-hip
ik-llama.cpp-cuda
ik-llama.cpp-hip
ik-llama.cpp-vulkan
)
provides=(llama.cpp)
@ -64,7 +73,7 @@ build() {
-DGGML_BLAS_VENDOR=OpenBLAS
-DGGML_LTO=ON
-DGGML_RPC=ON
-DGGML_NATEVE=ON
-DGGML_NATIVE=ON
-Wno-dev
)
cmake "${_cmake_options[@]}"