🔖 Bump llama.cpp-vulkan version to b7703-1

This commit is contained in:
Orion 2026-01-11 14:17:41 +00:00
parent 2aaf170600
commit 581d7f4d28
2 changed files with 11 additions and 5 deletions

View file

@ -1,6 +1,6 @@
pkgbase = llama.cpp-vulkan
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)
pkgver = b7699
pkgver = b7703
pkgrel = 1
url = https://github.com/ggml-org/llama.cpp
arch = x86_64
@ -29,10 +29,10 @@ pkgbase = llama.cpp-vulkan
options = lto
options = !debug
backup = etc/conf.d/llama.cpp
source = llama.cpp-vulkan-b7699.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7699.tar.gz
source = llama.cpp-vulkan-b7703.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7703.tar.gz
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf
sha256sums = bd6e21f1827080a98cd28db561851c41b966c9ed5b4e2b7a0cb6686d817fc55c
sha256sums = 809cdaa5859f1b05fdb198b07fcdbcd4ee23ce9ff91d006e5c4b19bf91820308
sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d
sha256sums = e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec

View file

@ -3,7 +3,7 @@
pkgname=llama.cpp-vulkan
_pkgname=${pkgname%%-vulkan}
pkgver=b7699
pkgver=b7703
pkgrel=1
pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)"
arch=(x86_64 armv7h aarch64)
@ -38,7 +38,7 @@ source=(
"https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service"
"https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf"
)
sha256sums=('bd6e21f1827080a98cd28db561851c41b966c9ed5b4e2b7a0cb6686d817fc55c'
sha256sums=('809cdaa5859f1b05fdb198b07fcdbcd4ee23ce9ff91d006e5c4b19bf91820308'
'0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d'
'e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec')
@ -83,6 +83,12 @@ build() {
)
fi
# 允许用户自定义构建选项
if [[ -n "$LLAMA_BUILD_EXTRA_ARGS" ]]; then
msg2 "Applied custom CMake build args: $LLAMA_BUILD_EXTRA_ARGS"
_cmake_options+=($LLAMA_BUILD_EXTRA_ARGS)
fi
cmake "${_cmake_options[@]}"
cmake --build build -- -j $(nproc)
}