From 7efe82fb4308dd76fc489d910e4a2c49be4f3e04 Mon Sep 17 00:00:00 2001 From: Orion Date: Fri, 30 Jan 2026 00:27:11 +0000 Subject: [PATCH] :bookmark: Bump llama.cpp-vulkan version to b7876-1 --- .SRCINFO | 6 +++--- PKGBUILD | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.SRCINFO b/.SRCINFO index 98bbff2698ece..7f09428973fd5 100644 --- a/.SRCINFO +++ b/.SRCINFO @@ -1,6 +1,6 @@ pkgbase = llama.cpp-vulkan pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations) - pkgver = b7865 + pkgver = b7876 pkgrel = 1 url = https://github.com/ggml-org/llama.cpp arch = x86_64 @@ -30,10 +30,10 @@ pkgbase = llama.cpp-vulkan options = lto options = !debug backup = etc/conf.d/llama.cpp - source = llama.cpp-vulkan-b7865.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7865.tar.gz + source = llama.cpp-vulkan-b7876.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b7876.tar.gz source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service source = https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf - sha256sums = 4a05a48fc364ba0bac8b9be87d7c9c4f9ebb8aa13c2233e839c49bd5ae3dc3fa + sha256sums = f143c15d7fc57fb4783f0abd5b4e407cac01d2c8db7267454291935592f95682 sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d sha256sums = e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec diff --git a/PKGBUILD b/PKGBUILD index 21807ca3fc260..07622c1d8d80e 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -3,7 +3,7 @@ pkgname=llama.cpp-vulkan _pkgname=${pkgname%%-vulkan} -pkgver=b7865 +pkgver=b7876 pkgrel=1 pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)" arch=(x86_64 armv7h aarch64) @@ -39,7 +39,7 @@ source=( "https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.service" "https://raw.githubusercontent.com/Orion-zhen/aur-packages/refs/heads/main/assets/llama.cpp/llama.cpp.conf" ) -sha256sums=('4a05a48fc364ba0bac8b9be87d7c9c4f9ebb8aa13c2233e839c49bd5ae3dc3fa' +sha256sums=('f143c15d7fc57fb4783f0abd5b4e407cac01d2c8db7267454291935592f95682' '0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d' 'e4856f186f69cd5dbfcc4edec9f6b6bd08e923bceedd8622eeae1a2595beb2ec') @@ -64,6 +64,7 @@ build() { -DGGML_RPC=ON -DGGML_VULKAN=ON -DGGML_CUDA_FA_ALL_QUANTS=ON + -DLLAMA_BUILD_NUMBER="${pkgver#b}" # 修正版本号 -Wno-dev )