From 32f7e6ca56f6ea544441e470124662c8cc436a8c Mon Sep 17 00:00:00 2001 From: txtsd Date: Sun, 27 Oct 2024 01:24:03 +0530 Subject: [PATCH] upgpkg: llama.cpp-hip b3982-1 Signed-off-by: txtsd --- .SRCINFO | 37 +++++++++++++++++++++++ .gitignore | 6 ++++ .nvchecker.toml | 4 +++ PKGBUILD | 77 +++++++++++++++++++++++++++++++++++++++++++++++ llama.cpp.conf | 1 + llama.cpp.service | 13 ++++++++ 6 files changed, 138 insertions(+) create mode 100644 .SRCINFO create mode 100644 .gitignore create mode 100644 .nvchecker.toml create mode 100644 PKGBUILD create mode 100644 llama.cpp.conf create mode 100644 llama.cpp.service diff --git a/.SRCINFO b/.SRCINFO new file mode 100644 index 0000000000000..5dcdd283f5166 --- /dev/null +++ b/.SRCINFO @@ -0,0 +1,37 @@ +pkgbase = llama.cpp-hip + pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations) + pkgver = b3982 + pkgrel = 1 + url = https://github.com/ggerganov/llama.cpp + arch = x86_64 + arch = armv7h + arch = aarch64 + license = MIT + makedepends = rocm-hip-runtime + makedepends = rocm-hip-sdk + makedepends = clblast + makedepends = cmake + makedepends = git + depends = curl + depends = gcc-libs + depends = glibc + depends = python + depends = python-numpy + depends = python-sentencepiece + depends = hipblas + depends = hip-runtime-amd + depends = openmp + depends = rocblas + provides = llama.cpp + conflicts = llama.cpp + options = lto + source = git+https://github.com/ggerganov/llama.cpp#tag=b3982 + source = git+https://github.com/nomic-ai/kompute.git + source = llama.cpp.conf + source = llama.cpp.service + sha256sums = 24615514be685a63a7d5ce3d7823e27abc09f67fef97bd3c021d1342d1cd4942 + sha256sums = SKIP + sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 + sha256sums = 065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669 + +pkgname = llama.cpp-hip diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000..5cdbded2a16af --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +pkg/ +src/ +llama.cpp/ +kompute/ + +*.tar.* diff --git a/.nvchecker.toml b/.nvchecker.toml new file mode 100644 index 0000000000000..93c61f0408c8a --- /dev/null +++ b/.nvchecker.toml @@ -0,0 +1,4 @@ +["llama.cpp-hip"] +source = "git" +git = "https://github.com/ggerganov/llama.cpp.git" +include_regex = "b\\d+" diff --git a/PKGBUILD b/PKGBUILD new file mode 100644 index 0000000000000..a5a44664fcc85 --- /dev/null +++ b/PKGBUILD @@ -0,0 +1,77 @@ +# Maintainer: txtsd + +pkgname=llama.cpp-hip +_pkgname=${pkgname%%-hip} +pkgver=b3982 +pkgrel=1 +pkgdesc="Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)" +arch=('x86_64' 'armv7h' 'aarch64') +url='https://github.com/ggerganov/llama.cpp' +license=('MIT') +depends=( + 'curl' + 'gcc-libs' + 'glibc' + 'python' + 'python-numpy' + 'python-sentencepiece' + 'hipblas' + 'hip-runtime-amd' + 'openmp' + 'rocblas' +) +makedepends=( + 'rocm-hip-runtime' + 'rocm-hip-sdk' + 'clblast' + 'cmake' + 'git' +) +provides=(${_pkgname}) +conflicts=(${_pkgname}) +options=(lto) +source=( + "git+${url}#tag=${pkgver}" + "git+https://github.com/nomic-ai/kompute.git" + 'llama.cpp.conf' + 'llama.cpp.service' +) +sha256sums=('24615514be685a63a7d5ce3d7823e27abc09f67fef97bd3c021d1342d1cd4942' + 'SKIP' + '53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87' + '065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669') + +prepare() { + cd "${_pkgname}" + + git submodule init + git config submodule.kompute.url "${srcdir}/kompute" + git -c protocol.file.allow=always submodule update +} + +build() { + cd "${_pkgname}" + export CC=/opt/rocm/llvm/bin/clang + export CXX=/opt/rocm/llvm/bin/clang++ + cmake -S . -B build \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DBUILD_SHARED_LIBS=ON \ + -DGGML_ALL_WARNINGS_3RD_PARTY=ON \ + -DGGML_BLAS=ON \ + -DGGML_LTO=ON \ + -DGGML_RPC=ON \ + -DLLAMA_CURL=ON \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DGGML_HIPBLAS=ON + cmake --build build --config Release +} + +package() { + cd "${_pkgname}" + + DESTDIR="${pkgdir}" cmake --install build + install -Dm644 'LICENSE' "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" + + install -Dm644 "${srcdir}/llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp" + install -Dm644 "${srcdir}/llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service" +} diff --git a/llama.cpp.conf b/llama.cpp.conf new file mode 100644 index 0000000000000..eab1f2be8d5cf --- /dev/null +++ b/llama.cpp.conf @@ -0,0 +1 @@ +LLAMA_ARGS="" diff --git a/llama.cpp.service b/llama.cpp.service new file mode 100644 index 0000000000000..4678d85bd1ef4 --- /dev/null +++ b/llama.cpp.service @@ -0,0 +1,13 @@ +[Unit] +Description=llama.cpp Server +After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target + +[Service] +Type=simple +EnvironmentFile=/etc/conf.d/llama.cpp +ExecStart=/usr/bin/llama-server $LLAMA_ARGS +ExecReload=/bin/kill -s HUP $MAINPID +Restart=never + +[Install] +WantedBy=multi-user.target