mirror of
https://github.com/archlinux/aur.git
synced 2026-03-14 23:16:48 +01:00
107 lines
3.1 KiB
Bash
107 lines
3.1 KiB
Bash
# # Maintainer: Orion-zhen <https://github.com/Orion-zhen>
|
|
# Contributor: txtsd <aur.archlinux@ihavea.quest>
|
|
|
|
pkgname=llama.cpp-vulkan
|
|
_pkgname=${pkgname%%-vulkan}
|
|
pkgver=b8340
|
|
pkgrel=1
|
|
pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)"
|
|
arch=(x86_64 armv7h aarch64)
|
|
url='https://github.com/ggml-org/llama.cpp'
|
|
license=('MIT')
|
|
depends=(
|
|
curl
|
|
gcc-libs
|
|
glibc
|
|
python
|
|
vulkan-icd-loader
|
|
)
|
|
makedepends=(
|
|
cmake
|
|
git
|
|
shaderc
|
|
vulkan-headers
|
|
)
|
|
optdepends=(
|
|
'python-numpy: needed for convert_hf_to_gguf.py'
|
|
'python-safetensors: needed for convert_hf_to_gguf.py'
|
|
'python-sentencepiece: needed for convert_hf_to_gguf.py'
|
|
'python-pytorch: needed for convert_hf_to_gguf.py'
|
|
'python-transformers: needed for convert_hf_to_gguf.py'
|
|
'python-gguf: needed for convert_hf_to_gguf.py'
|
|
)
|
|
provides=(${_pkgname})
|
|
conflicts=(${_pkgname} libggml ggml stable-diffusion.cpp)
|
|
options=(lto !debug)
|
|
backup=("etc/conf.d/llama.cpp")
|
|
source=(
|
|
"${pkgname}-${pkgver}.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/${pkgver}.tar.gz"
|
|
"https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service"
|
|
"https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf"
|
|
)
|
|
sha256sums=('65a47ecec6325913034e44035f9685db992d6d2f47ef74361d06004716812718'
|
|
'0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d'
|
|
'e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec')
|
|
|
|
prepare() {
|
|
ln -sf "${_pkgname}-${pkgver}" llama.cpp
|
|
}
|
|
|
|
build() {
|
|
local _cmake_options=(
|
|
-B build
|
|
-S "${_pkgname}"
|
|
-DCMAKE_BUILD_TYPE=Release
|
|
-DCMAKE_INSTALL_PREFIX='/usr'
|
|
-DBUILD_SHARED_LIBS=ON
|
|
-DLLAMA_BUILD_TESTS=OFF
|
|
-DLLAMA_USE_SYSTEM_GGML=OFF
|
|
-DGGML_ALL_WARNINGS=OFF
|
|
-DGGML_ALL_WARNINGS_3RD_PARTY=OFF
|
|
-DGGML_BUILD_EXAMPLES=OFF
|
|
-DGGML_BUILD_TESTS=OFF
|
|
-DGGML_LTO=ON
|
|
-DGGML_RPC=ON
|
|
-DGGML_VULKAN=ON
|
|
-DGGML_CUDA_FA_ALL_QUANTS=ON
|
|
-DLLAMA_BUILD_NUMBER="${pkgver#b}" # 修正版本号
|
|
-Wno-dev
|
|
)
|
|
|
|
# 检查是否在 CI 环境中构建
|
|
if [ -n "$CI" ] && [ "$CI" != 0 ]; then
|
|
msg2 "CI = $CI detected, building universal package"
|
|
# 启用通用构建
|
|
_cmake_options+=(
|
|
-DGGML_BACKEND_DL=ON
|
|
-DGGML_CPU_ALL_VARIANTS=ON
|
|
-DGGML_NATIVE=OFF
|
|
# -DGGML_ZENDNN=ON
|
|
)
|
|
else
|
|
# 本地构建, 针对当前设备优化
|
|
_cmake_options+=(
|
|
-DGGML_NATIVE=ON
|
|
)
|
|
fi
|
|
|
|
# 允许用户自定义构建选项
|
|
if [[ -n "$LLAMA_BUILD_EXTRA_ARGS" ]]; then
|
|
msg2 "Applied custom CMake build args: $LLAMA_BUILD_EXTRA_ARGS"
|
|
_cmake_options+=($LLAMA_BUILD_EXTRA_ARGS)
|
|
fi
|
|
|
|
cmake "${_cmake_options[@]}"
|
|
cmake --build build -- -j $(nproc)
|
|
}
|
|
|
|
package() {
|
|
DESTDIR="${pkgdir}" cmake --install build
|
|
|
|
install -Dm644 "${_pkgname}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
|
|
install -Dm644 "llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp"
|
|
install -Dm644 "llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service"
|
|
|
|
msg2 "llama.cpp.service is now available"
|
|
msg2 "llama-server arguments are in /etc/conf.d/llama.cpp"
|
|
}
|