From f09097f74cdff5d793b4984a42a96aecf6cc443f Mon Sep 17 00:00:00 2001 From: txtsd Date: Sun, 15 Jun 2025 17:30:57 +0530 Subject: [PATCH] upgpkg: llama.cpp-vulkan b5669-2 Signed-off-by: txtsd --- .SRCINFO | 13 +++---------- PKGBUILD | 31 +++++-------------------------- 2 files changed, 8 insertions(+), 36 deletions(-) diff --git a/.SRCINFO b/.SRCINFO index 0f99a99c22e4f..fcbd9f635f34b 100644 --- a/.SRCINFO +++ b/.SRCINFO @@ -1,7 +1,7 @@ pkgbase = llama.cpp-vulkan pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations) pkgver = b5669 - pkgrel = 1 + pkgrel = 2 url = https://github.com/ggerganov/llama.cpp arch = x86_64 arch = armv7h @@ -9,29 +9,22 @@ pkgbase = llama.cpp-vulkan license = MIT makedepends = cmake makedepends = git - makedepends = shaderc - makedepends = vulkan-headers - makedepends = pkgconf - depends = blas-openblas - depends = blas64-openblas depends = curl depends = gcc-libs depends = glibc - depends = openmp + depends = libggml-vulkan depends = python depends = python-numpy depends = python-sentencepiece - depends = vulkan-icd-loader optdepends = python-pytorch provides = llama.cpp conflicts = llama.cpp options = lto + options = !debug source = git+https://github.com/ggerganov/llama.cpp#tag=b5669 - source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service sha256sums = eb46ae341d2f89395c4c86186c25be504a9e4db3369e1a826ccb3bc50743d21c - sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d diff --git a/PKGBUILD b/PKGBUILD index 1a15f0afd3521..7add3d381f549 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -3,67 +3,47 @@ pkgname=llama.cpp-vulkan _pkgname=${pkgname%%-vulkan} pkgver=b5669 -pkgrel=1 +pkgrel=2 pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)" arch=(x86_64 armv7h aarch64) url='https://github.com/ggerganov/llama.cpp' license=('MIT') depends=( - blas-openblas - blas64-openblas curl gcc-libs glibc - openmp + libggml-vulkan python python-numpy python-sentencepiece - vulkan-icd-loader ) makedepends=( cmake git - shaderc - vulkan-headers - pkgconf ) optdepends=(python-pytorch) provides=(${_pkgname}) conflicts=(${_pkgname}) -options+=(lto) +options=(lto !debug) source=( "git+${url}#tag=${pkgver}" - "git+https://github.com/nomic-ai/kompute.git" llama.cpp.conf llama.cpp.service ) sha256sums=('eb46ae341d2f89395c4c86186c25be504a9e4db3369e1a826ccb3bc50743d21c' - 'SKIP' '53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87' '0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d') -prepare() { - cd "${_pkgname}" - - git submodule init - git config submodule.kompute.url "${srcdir}/kompute" - git -c protocol.file.allow=always submodule update -} - build() { local _cmake_options=( -B build -S "${_pkgname}" -DCMAKE_BUILD_TYPE=None -DCMAKE_INSTALL_PREFIX='/usr' - -DGGML_ALL_WARNINGS=OFF - -DGGML_ALL_WARNINGS_3RD_PARTY=OFF -DBUILD_SHARED_LIBS=ON - -DGGML_LTO=ON - -DGGML_RPC=ON -DLLAMA_CURL=ON - -DGGML_BLAS=ON - -DGGML_VULKAN=ON + -DLLAMA_BUILD_TESTS=OFF + -DLLAMA_USE_SYSTEM_GGML=ON -Wno-dev ) cmake "${_cmake_options[@]}" @@ -72,7 +52,6 @@ build() { package() { DESTDIR="${pkgdir}" cmake --install build - rm "${pkgdir}/usr/include/"ggml* install -Dm644 "${_pkgname}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"