aur/.SRCINFO
2025-12-28 00:26:00 +00:00

39 lines
1.5 KiB
Text

pkgbase = llama.cpp-vulkan
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)
pkgver = b7555
pkgrel = 1
url = https://github.com/ggml-org/llama.cpp
arch = x86_64
arch = armv7h
arch = aarch64
license = MIT
makedepends = cmake
makedepends = git
makedepends = shaderc
makedepends = vulkan-headers
depends = curl
depends = gcc-libs
depends = glibc
depends = python
depends = vulkan-icd-loader
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = llama.cpp
conflicts = libggml
conflicts = ggml
conflicts = stable-diffusion.cpp
options = lto
options = !debug
backup = etc/conf.d/llama.cpp
source = llama.cpp-vulkan-b7555.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7555.tar.gz
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf
sha256sums = fa6b58b1da7120c1bd53e1f7d60a41d8c79683c587f271bb3bd248140f9a8a89
sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d
sha256sums = e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec
pkgname = llama.cpp-vulkan