From 5dc54a54372bca990f04c4c52192529162ddaab0 Mon Sep 17 00:00:00 2001 From: txtsd Date: Sat, 9 Nov 2024 11:58:53 +0530 Subject: [PATCH] upgpkg: llama.cpp-vulkan b4053-1 Upstream Release Signed-off-by: txtsd --- .SRCINFO | 6 ++-- PKGBUILD | 86 +++++++++++++++++++++++++++++++------------------------- 2 files changed, 50 insertions(+), 42 deletions(-) diff --git a/.SRCINFO b/.SRCINFO index 245da7e4c043d..f38cb7afed193 100644 --- a/.SRCINFO +++ b/.SRCINFO @@ -1,6 +1,6 @@ pkgbase = llama.cpp-vulkan pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations) - pkgver = b4033 + pkgver = b4053 pkgrel = 1 url = https://github.com/ggerganov/llama.cpp arch = x86_64 @@ -23,11 +23,11 @@ pkgbase = llama.cpp-vulkan depends = python-sentencepiece depends = vulkan-icd-loader options = lto - source = git+https://github.com/ggerganov/llama.cpp#tag=b4033 + source = git+https://github.com/ggerganov/llama.cpp#tag=b4053 source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service - sha256sums = 3299f2b01218723720e42f9bd035d14ec06eb1df86d1e77ce3ff1b9f2c96bc40 + sha256sums = 2e700ceb1142b07c1647d090027a5a5cbd230b316c88a78c7a47afe525ab3033 sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669 diff --git a/PKGBUILD b/PKGBUILD index 7aa554d25e87b..e2ff41171a00a 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -2,39 +2,39 @@ pkgname=llama.cpp-vulkan _pkgname=${pkgname%%-vulkan} -pkgver=b4033 +pkgver=b4053 pkgrel=1 pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)" -arch=('x86_64' 'armv7h' 'aarch64') +arch=(x86_64 armv7h aarch64) url='https://github.com/ggerganov/llama.cpp' license=('MIT') depends=( - 'blas-openblas' - 'blas64-openblas' - 'curl' - 'gcc-libs' - 'glibc' - 'openmp' - 'python' - 'python-numpy' - 'python-sentencepiece' - 'vulkan-icd-loader' + blas-openblas + blas64-openblas + curl + gcc-libs + glibc + openmp + python + python-numpy + python-sentencepiece + vulkan-icd-loader ) makedepends=( - 'cmake' - 'git' - 'shaderc' - 'vulkan-headers' - 'pkgconf' + cmake + git + shaderc + vulkan-headers + pkgconf ) options=(lto) source=( "git+${url}#tag=${pkgver}" "git+https://github.com/nomic-ai/kompute.git" - 'llama.cpp.conf' - 'llama.cpp.service' + llama.cpp.conf + llama.cpp.service ) -sha256sums=('3299f2b01218723720e42f9bd035d14ec06eb1df86d1e77ce3ff1b9f2c96bc40' +sha256sums=('2e700ceb1142b07c1647d090027a5a5cbd230b316c88a78c7a47afe525ab3033' 'SKIP' '53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87' '065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669') @@ -48,29 +48,37 @@ prepare() { } build() { - cd "${_pkgname}" - cmake -S . -B build \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DBUILD_SHARED_LIBS=ON \ - -DGGML_ALL_WARNINGS_3RD_PARTY=ON \ - -DGGML_BLAS=ON \ - -DGGML_LTO=ON \ - -DGGML_RPC=ON \ - -DLLAMA_CURL=ON \ - -DLLAMA_FATAL_WARNINGS=OFF \ - -DGGML_VULKAN=ON \ - -DGGML_VULKAN_CHECK_RESULTS=ON \ - -DGGML_VULKAN_RUN_TESTS=ON \ + local _cmake_options=( + -B build + -S "${_pkgname}" + -DCMAKE_BUILD_TYPE=None + -DCMAKE_INSTALL_PREFIX='/usr' + -DGGML_NATIVE=OFF + -DGGML_AVX2=OFF + -DGGML_AVX=OFF + -DGGML_F16C=OFF + -DGGML_FMA=OFF + -DGGML_ALL_WARNINGS=OFF + -DGGML_ALL_WARNINGS_3RD_PARTY=OFF + -DGGML_LTO=ON + -DGGML_RPC=ON + -DLLAMA_CURL=ON + -DGGML_BLAS=ON + -DGGML_VULKAN=ON + -DGGML_VULKAN_CHECK_RESULTS=ON + -DGGML_VULKAN_RUN_TESTS=ON -DGGML_VULKAN_VALIDATE=ON - cmake --build build --config Release + -Wno-dev + ) + cmake "${_cmake_options[@]}" + cmake --build build } package() { - cd "${_pkgname}" - DESTDIR="${pkgdir}" cmake --install build - install -Dm644 'LICENSE' "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" - install -Dm644 "${srcdir}/llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp" - install -Dm644 "${srcdir}/llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service" + install -Dm644 "${_pkgname}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" + + install -Dm644 "llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp" + install -Dm644 "llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service" }