aur/.SRCINFO
2025-11-09 00:23:26 +00:00

37 lines
1.1 KiB
Text

pkgbase = llama.cpp-hip
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)
pkgver = b6992
pkgrel = 1
url = https://github.com/ggml-org/llama.cpp
arch = x86_64
arch = armv7h
arch = aarch64
license = MIT
makedepends = cmake
makedepends = git
makedepends = rocm-hip-sdk
depends = curl
depends = gcc-libs
depends = glibc
depends = hip-runtime-amd
depends = hipblas
depends = openmp
depends = python
depends = rocblas
depends = rocwmma
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = llama.cpp
conflicts = libggml
conflicts = ggml
conflicts = stable-diffusion.cpp
options = lto
options = !debug
source = llama.cpp-hip-b6992.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b6992.tar.gz
sha256sums = 65c4cf114c838d5849d91c7d00507561bccca0dace5d8d6f1dc841c9046a8483
pkgname = llama.cpp-hip