🔖 Bump llama.cpp-vulkan version to b6106-1

This commit is contained in:
Orion 2025-08-07 04:22:27 +00:00
parent a1f642c1d1
commit 52f79c5a51
2 changed files with 5 additions and 3 deletions

View file

@ -1,6 +1,6 @@
pkgbase = llama.cpp-vulkan
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)
pkgver = b6081
pkgver = b6106
pkgrel = 1
url = https://github.com/ggerganov/llama.cpp
arch = x86_64

View file

@ -3,7 +3,7 @@
pkgname=llama.cpp-vulkan
_pkgname=${pkgname%%-vulkan}
pkgver=b6081
pkgver=b6106
pkgrel=1
pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)"
arch=(x86_64 armv7h aarch64)
@ -66,7 +66,9 @@ build() {
-DGGML_BUILD_TESTS=OFF
-DGGML_LTO=ON
-DGGML_RPC=ON
-DGGML_VULKAN=1
-DGGML_VULKAN=ON
-DGGML_CUDA_FA_ALL_QUANTS=ON
-DGGML_NATIVE=ON
-Wno-dev
)
cmake "${_cmake_options[@]}"