mirror of
https://github.com/archlinux/aur.git
synced 2026-03-14 23:16:48 +01:00
🔖 Bump ik-llama.cpp-vulkan version to r3826.ae0ba31f-1
This commit is contained in:
commit
8c8cbc25f7
2 changed files with 121 additions and 0 deletions
37
.SRCINFO
Normal file
37
.SRCINFO
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
pkgbase = ik-llama.cpp-vulkan
|
||||
pkgdesc = llama.cpp fork with additional SOTA quants and improved performance (Vulkan Backend)
|
||||
pkgver = r3826.ae0ba31f
|
||||
pkgrel = 1
|
||||
url = https://github.com/ikawrakow/ik_llama.cpp
|
||||
arch = x86_64
|
||||
arch = armv7h
|
||||
arch = aarch64
|
||||
license = MIT
|
||||
makedepends = cmake
|
||||
makedepends = git
|
||||
makedepends = shaderc
|
||||
makedepends = vulkan-headers
|
||||
depends = curl
|
||||
depends = gcc-libs
|
||||
depends = glibc
|
||||
depends = python
|
||||
depends = vulkan-icd-loader
|
||||
optdepends = python-numpy: needed for convert_hf_to_gguf.py
|
||||
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
|
||||
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
|
||||
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
|
||||
optdepends = python-transformers: needed for convert_hf_to_gguf.py
|
||||
provides = llama.cpp
|
||||
conflicts = libggml
|
||||
conflicts = ggml
|
||||
conflicts = llama.cpp
|
||||
conflicts = llama.cpp-vulkan
|
||||
conflicts = llama.cpp-cuda
|
||||
conflicts = llama.cpp-hip
|
||||
conflicts = ik-llama.cpp
|
||||
conflicts = ik-llama.cpp-cuda
|
||||
conflicts = ik-llama.cpp-hip
|
||||
options = lto
|
||||
options = !debug
|
||||
|
||||
pkgname = ik-llama.cpp-vulkan
|
||||
84
PKGBUILD
Normal file
84
PKGBUILD
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
# Maintainer: Orion-zhen <https://github.com/Orion-zhen>
|
||||
|
||||
pkgname=ik-llama.cpp-vulkan
|
||||
_pkgname=ik_llama.cpp
|
||||
pkgver=r3826.ae0ba31f
|
||||
pkgrel=1
|
||||
pkgdesc="llama.cpp fork with additional SOTA quants and improved performance (Vulkan Backend)"
|
||||
arch=(x86_64 armv7h aarch64)
|
||||
url="https://github.com/ikawrakow/ik_llama.cpp"
|
||||
license=("MIT")
|
||||
depends=(
|
||||
curl
|
||||
gcc-libs
|
||||
glibc
|
||||
python
|
||||
vulkan-icd-loader
|
||||
)
|
||||
makedepends=(
|
||||
cmake
|
||||
git
|
||||
shaderc
|
||||
vulkan-headers
|
||||
)
|
||||
optdepends=(
|
||||
'python-numpy: needed for convert_hf_to_gguf.py'
|
||||
'python-safetensors: needed for convert_hf_to_gguf.py'
|
||||
'python-sentencepiece: needed for convert_hf_to_gguf.py'
|
||||
'python-pytorch: needed for convert_hf_to_gguf.py'
|
||||
'python-transformers: needed for convert_hf_to_gguf.py'
|
||||
)
|
||||
conflicts=(
|
||||
libggml
|
||||
ggml
|
||||
llama.cpp
|
||||
llama.cpp-vulkan
|
||||
llama.cpp-cuda
|
||||
llama.cpp-hip
|
||||
ik-llama.cpp
|
||||
ik-llama.cpp-cuda
|
||||
ik-llama.cpp-hip
|
||||
)
|
||||
provides=(llama.cpp)
|
||||
|
||||
options=(lto !debug)
|
||||
|
||||
source=()
|
||||
sha256sums=()
|
||||
|
||||
prepare() {
|
||||
cd "$srcdir"
|
||||
git clone --single-branch --branch main "${url}" "${_pkgname}"
|
||||
}
|
||||
|
||||
pkgver() {
|
||||
cd "$_pkgname"
|
||||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
|
||||
}
|
||||
|
||||
build() {
|
||||
local _cmake_options=(
|
||||
-B build
|
||||
-S "${_pkgname}"
|
||||
-DCMAKE_INSTALL_PREFIX='/usr'
|
||||
-DBUILD_SHARED_LIBS=ON
|
||||
-DLLAMA_CURL=ON
|
||||
-DLLAMA_BUILD_TESTS=OFF
|
||||
-DLLAMA_USE_SYSTEM_GGML=OFF
|
||||
-DGGML_ALL_WARNINGS=OFF
|
||||
-DGGML_ALL_WARNINGS_3RD_PARTY=OFF
|
||||
-DGGML_BUILD_EXAMPLES=OFF
|
||||
-DGGML_BUILD_TESTS=OFF
|
||||
-DGGML_VULKAN=ON
|
||||
-DGGML_LTO=ON
|
||||
-DGGML_RPC=ON
|
||||
-DGGML_NATIVE=ON
|
||||
-Wno-dev
|
||||
)
|
||||
cmake "${_cmake_options[@]}"
|
||||
cmake --build build --config Release
|
||||
}
|
||||
|
||||
package() {
|
||||
DESTDIR="${pkgdir}" cmake --install build
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue