commit cb41acb6053a5b6bfd4e54f88f00eadf1bb6255c Author: Orion <128988082+Orion-zhen@users.noreply.github.com> Date: Thu Jul 31 09:40:53 2025 +0800 ik-llama.cpp-cuda diff --git a/.SRCINFO b/.SRCINFO new file mode 100644 index 0000000000000..3583da1267091 --- /dev/null +++ b/.SRCINFO @@ -0,0 +1,31 @@ +pkgbase = ik-llama.cpp-cuda + pkgdesc = llama.cpp fork with additional SOTA quants and improved performance (CUDA Backend) + pkgver = 1.0 + pkgrel = 1 + url = https://github.com/ikawrakow/ik_llama.cpp + arch = x86_64 + arch = armv7h + arch = aarch64 + license = MIT + makedepends = cmake + makedepends = git + depends = cuda + depends = nvidia-utils + depends = curl + depends = gcc-libs + depends = glibc + depends = python + provides = llama.cpp + conflicts = libggml + conflicts = ggml + conflicts = llama.cpp + conflicts = llama.cpp-vulkan + conflicts = llama.cpp-cuda + conflicts = llama.cpp-hip + conflicts = ik-llama.cpp + options = lto + options = !debug + source = git+https://github.com/ikawrakow/ik_llama.cpp + sha256sums = SKIP + +pkgname = ik-llama.cpp-cuda diff --git a/PKGBUILD b/PKGBUILD new file mode 100644 index 0000000000000..e29d5e6a77535 --- /dev/null +++ b/PKGBUILD @@ -0,0 +1,69 @@ +# Maintainer: Orion-zhen + +pkgname=ik-llama.cpp-cuda +_pkgname=ik_llama.cpp +pkgver=1.0 +pkgrel=1 +pkgdesc="llama.cpp fork with additional SOTA quants and improved performance (CUDA Backend)" +arch=(x86_64 armv7h aarch64) +url="https://github.com/ikawrakow/ik_llama.cpp" +license=("MIT") +depends=( + cuda + nvidia-utils + curl + gcc-libs + glibc + python +) +makedepends=( + cmake + git +) +conflicts=( + libggml + ggml + llama.cpp + llama.cpp-vulkan + llama.cpp-cuda + llama.cpp-hip + ik-llama.cpp +) +provides=(llama.cpp) + +options=(lto !debug) + +source=("git+${url}") +sha256sums=("SKIP") + +pkgver() { + cd "$_pkgname" + printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)" +} + +build() { + local _cmake_options=( + -B build + -S "${_pkgname}" + -DCMAKE_INSTALL_PREFIX='/usr' + -DBUILD_SHARED_LIBS=ON + -DLLAMA_CURL=ON + -DLLAMA_BUILD_TESTS=OFF + -DLLAMA_USE_SYSTEM_GGML=OFF + -DGGML_ALL_WARNINGS=OFF + -DGGML_ALL_WARNINGS_3RD_PARTY=OFF + -DGGML_BUILD_EXAMPLES=OFF + -DGGML_BUILD_TESTS=OFF + -DGGML_CUDA=ON + -DGGML_LTO=ON + -DGGML_RPC=ON + -DGGML_NATIVE=ON + -Wno-dev + ) + cmake "${_cmake_options[@]}" + cmake --build build --config Release +} + +package() { + DESTDIR="${pkgdir}" cmake --install build +}