pkgbase = llama.cpp-hip pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations) pkgver = b4855 pkgrel = 1 url = https://github.com/ggerganov/llama.cpp arch = x86_64 arch = armv7h arch = aarch64 license = MIT makedepends = clblast makedepends = cmake makedepends = git makedepends = rocm-hip-runtime makedepends = rocm-hip-sdk depends = blas-openblas depends = blas64-openblas depends = curl depends = gcc-libs depends = glibc depends = hip-runtime-amd depends = hipblas depends = openmp depends = python depends = python-numpy depends = python-sentencepiece depends = rocblas optdepends = python-pytorch provides = llama.cpp conflicts = llama.cpp options = lto source = git+https://github.com/ggerganov/llama.cpp#tag=b4855 source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service sha256sums = 299681fb18190314b1c708d5cdb71cce1bb919fd2c97c56fcdfb44d2927d382b sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d pkgname = llama.cpp-hip