aur/.SRCINFO
2026-02-05 16:12:48 +00:00

43 lines
1.6 KiB
Text

pkgbase = llama.cpp-hip
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)
pkgver = b7947
pkgrel = 1
url = https://github.com/ggml-org/llama.cpp
arch = x86_64
arch = armv7h
arch = aarch64
license = MIT
makedepends = cmake
makedepends = git
makedepends = rocm-hip-sdk
depends = curl
depends = gcc-libs
depends = glibc
depends = hip-runtime-amd
depends = hipblas
depends = openmp
depends = python
depends = rocblas
depends = rocwmma
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
optdepends = python-gguf: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = llama.cpp
conflicts = libggml
conflicts = ggml
conflicts = stable-diffusion.cpp
options = lto
options = !debug
backup = etc/conf.d/llama.cpp
source = llama.cpp-hip-b7947.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7947.tar.gz
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service
source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf
sha256sums = 0fc86047cae1323388e7b13f41087f278f99cb72c2caca08878421b87991fab2
sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d
sha256sums = e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec
pkgname = llama.cpp-hip