aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYuri Victorovich <yuri@FreeBSD.org>2025-02-06 15:00:18 +0000
committerYuri Victorovich <yuri@FreeBSD.org>2025-02-06 15:05:43 +0000
commit728f068f9ad61831e01c4bb5474bf93be42047e9 (patch)
treeba6123ae45baa290aecd06c106dfb4191926c7be
parentdaa995c601e31435d1c8ed110cbb0653d9012570 (diff)
misc/llama-cpp: update 4576 → 4649
-rw-r--r--misc/llama-cpp/Makefile13
-rw-r--r--misc/llama-cpp/distinfo6
-rw-r--r--misc/llama-cpp/pkg-plist1
3 files changed, 15 insertions, 5 deletions
diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index e0e3b47749c8..cf207ec7a275 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
PORTNAME= llama-cpp
DISTVERSIONPREFIX= b
-DISTVERSION= 4576
+DISTVERSION= 4649
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
@@ -44,7 +44,8 @@ VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \
vulkan-headers>0:graphics/vulkan-headers
VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
-BINARY_ALIAS= git=false
+BINARY_ALIAS= git=false \
+ python=${PYTHON_CMD} # for tests
do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/issues/10955
@cd ${WRKSRC} && \
@@ -52,4 +53,12 @@ do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/i
# tests as of 4458: 97% tests passed, 1 tests failed out of 31, see https://github.com/ggerganov/llama.cpp/issues/11036
+# tests as of 4649:
+# 88% tests passed, 4 tests failed out of 32
+# The following tests FAILED:
+# 18 - test-chat (Subprocess aborted) main # see https://github.com/ggerganov/llama.cpp/issues/11705
+# 24 - test-gguf (SEGFAULT) main
+# 25 - test-backend-ops (SEGFAULT) main
+# 32 - test-eval-callback (SEGFAULT) curl eval-callback
+
.include <bsd.port.mk>
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index 02325335e84f..5b684aca34ec 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1738123263
-SHA256 (ggerganov-llama.cpp-b4576_GH0.tar.gz) = d0c3394ad2c2a89ee79439590b6fb70f443b01b55de4fbe6b90334950429fb54
-SIZE (ggerganov-llama.cpp-b4576_GH0.tar.gz) = 20506059
+TIMESTAMP = 1738829439
+SHA256 (ggerganov-llama.cpp-b4649_GH0.tar.gz) = af7b3235853d6cf12673fdcf424b05a5ec16dc1f45029e8ff4b2d6ee5e901f08
+SIZE (ggerganov-llama.cpp-b4649_GH0.tar.gz) = 20591599
SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index ef9c3ec8ebe3..0dc4473711b3 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -17,6 +17,7 @@ bin/convert_hf_to_gguf.py
%%EXAMPLES%%bin/llama-imatrix
%%EXAMPLES%%bin/llama-infill
%%EXAMPLES%%bin/llama-llava-cli
+%%EXAMPLES%%bin/llama-llava-clip-quantize-cli
%%EXAMPLES%%bin/llama-lookahead
%%EXAMPLES%%bin/llama-lookup
%%EXAMPLES%%bin/llama-lookup-create