🔖 Bump llama.cpp-hip version to b6106-1

This commit is contained in:
Orion 2025-08-07 04:22:29 +00:00
parent a724cef2f1
commit 1efe5d34a9
2 changed files with 4 additions and 2 deletions

View file

@ -1,6 +1,6 @@
pkgbase = llama.cpp-hip
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)
pkgver = b6081
pkgver = b6106
pkgrel = 1
url = https://github.com/ggml-org/llama.cpp
arch = x86_64

View file

@ -3,7 +3,7 @@
pkgname=llama.cpp-hip
_pkgname="${pkgname%-hip}"
pkgver=b6081
pkgver=b6106
pkgrel=1
pkgdesc="Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)"
arch=(x86_64 armv7h aarch64)
@ -71,7 +71,9 @@ build() {
-DGGML_LTO=ON
-DGGML_RPC=ON
-DGGML_HIP=ON
-DGGML_HIP_GRAPHS=ON
-DGGML_CUDA_FA_ALL_QUANTS=ON
-DGGML_NATIVE=ON
-Wno-dev
)
cmake "${_cmake_options[@]}"