From 8c8cbc25f7d532ddb366f2c4e37bb5f4c50fd5ce Mon Sep 17 00:00:00 2001 From: Orion Date: Fri, 1 Aug 2025 10:02:04 +0000 Subject: [PATCH] :bookmark: Bump ik-llama.cpp-vulkan version to r3826.ae0ba31f-1 --- .SRCINFO | 37 +++++++++++++++++++++++++ PKGBUILD | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 .SRCINFO create mode 100644 PKGBUILD diff --git a/.SRCINFO b/.SRCINFO new file mode 100644 index 0000000000000..ecf3b00ccfe3e --- /dev/null +++ b/.SRCINFO @@ -0,0 +1,37 @@ +pkgbase = ik-llama.cpp-vulkan + pkgdesc = llama.cpp fork with additional SOTA quants and improved performance (Vulkan Backend) + pkgver = r3826.ae0ba31f + pkgrel = 1 + url = https://github.com/ikawrakow/ik_llama.cpp + arch = x86_64 + arch = armv7h + arch = aarch64 + license = MIT + makedepends = cmake + makedepends = git + makedepends = shaderc + makedepends = vulkan-headers + depends = curl + depends = gcc-libs + depends = glibc + depends = python + depends = vulkan-icd-loader + optdepends = python-numpy: needed for convert_hf_to_gguf.py + optdepends = python-safetensors: needed for convert_hf_to_gguf.py + optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py + optdepends = python-pytorch: needed for convert_hf_to_gguf.py + optdepends = python-transformers: needed for convert_hf_to_gguf.py + provides = llama.cpp + conflicts = libggml + conflicts = ggml + conflicts = llama.cpp + conflicts = llama.cpp-vulkan + conflicts = llama.cpp-cuda + conflicts = llama.cpp-hip + conflicts = ik-llama.cpp + conflicts = ik-llama.cpp-cuda + conflicts = ik-llama.cpp-hip + options = lto + options = !debug + +pkgname = ik-llama.cpp-vulkan diff --git a/PKGBUILD b/PKGBUILD new file mode 100644 index 0000000000000..c4035d973cb6d --- /dev/null +++ b/PKGBUILD @@ -0,0 +1,84 @@ +# Maintainer: Orion-zhen + +pkgname=ik-llama.cpp-vulkan +_pkgname=ik_llama.cpp +pkgver=r3826.ae0ba31f +pkgrel=1 +pkgdesc="llama.cpp fork with additional SOTA quants and improved performance (Vulkan Backend)" +arch=(x86_64 armv7h aarch64) +url="https://github.com/ikawrakow/ik_llama.cpp" +license=("MIT") +depends=( + curl + gcc-libs + glibc + python + vulkan-icd-loader +) +makedepends=( + cmake + git + shaderc + vulkan-headers +) +optdepends=( + 'python-numpy: needed for convert_hf_to_gguf.py' + 'python-safetensors: needed for convert_hf_to_gguf.py' + 'python-sentencepiece: needed for convert_hf_to_gguf.py' + 'python-pytorch: needed for convert_hf_to_gguf.py' + 'python-transformers: needed for convert_hf_to_gguf.py' +) +conflicts=( + libggml + ggml + llama.cpp + llama.cpp-vulkan + llama.cpp-cuda + llama.cpp-hip + ik-llama.cpp + ik-llama.cpp-cuda + ik-llama.cpp-hip +) +provides=(llama.cpp) + +options=(lto !debug) + +source=() +sha256sums=() + +prepare() { + cd "$srcdir" + git clone --single-branch --branch main "${url}" "${_pkgname}" +} + +pkgver() { + cd "$_pkgname" + printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)" +} + +build() { + local _cmake_options=( + -B build + -S "${_pkgname}" + -DCMAKE_INSTALL_PREFIX='/usr' + -DBUILD_SHARED_LIBS=ON + -DLLAMA_CURL=ON + -DLLAMA_BUILD_TESTS=OFF + -DLLAMA_USE_SYSTEM_GGML=OFF + -DGGML_ALL_WARNINGS=OFF + -DGGML_ALL_WARNINGS_3RD_PARTY=OFF + -DGGML_BUILD_EXAMPLES=OFF + -DGGML_BUILD_TESTS=OFF + -DGGML_VULKAN=ON + -DGGML_LTO=ON + -DGGML_RPC=ON + -DGGML_NATIVE=ON + -Wno-dev + ) + cmake "${_cmake_options[@]}" + cmake --build build --config Release +} + +package() { + DESTDIR="${pkgdir}" cmake --install build +}