mirror of
https://github.com/archlinux/aur.git
synced 2026-03-14 23:16:48 +01:00
36 lines
1.1 KiB
Text
36 lines
1.1 KiB
Text
pkgbase = ik-llama.cpp-vulkan
|
|
pkgdesc = llama.cpp fork with additional SOTA quants and improved performance (Vulkan Backend)
|
|
pkgver = r3904.0ad1d340
|
|
pkgrel = 1
|
|
url = https://github.com/ikawrakow/ik_llama.cpp
|
|
arch = x86_64
|
|
arch = armv7h
|
|
arch = aarch64
|
|
license = MIT
|
|
makedepends = cmake
|
|
makedepends = git
|
|
makedepends = shaderc
|
|
makedepends = vulkan-headers
|
|
depends = curl
|
|
depends = gcc-libs
|
|
depends = glibc
|
|
depends = python
|
|
depends = vulkan-icd-loader
|
|
optdepends = python-numpy: needed for convert_hf_to_gguf.py
|
|
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
|
|
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
|
|
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
|
|
optdepends = python-transformers: needed for convert_hf_to_gguf.py
|
|
provides = llama.cpp
|
|
conflicts = libggml
|
|
conflicts = ggml
|
|
conflicts = llama.cpp
|
|
conflicts = llama.cpp-vulkan
|
|
conflicts = llama.cpp-cuda
|
|
conflicts = llama.cpp-hip
|
|
conflicts = ik-llama.cpp
|
|
conflicts = ik-llama.cpp-cuda
|
|
options = lto
|
|
options = !debug
|
|
|
|
pkgname = ik-llama.cpp-vulkan
|