aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYuri Victorovich <yuri@FreeBSD.org>2024-02-15 11:24:32 +0000
committerYuri Victorovich <yuri@FreeBSD.org>2024-02-15 11:25:01 +0000
commitd549d297aaf7d12246f16893f0521079da99de9e (patch)
tree1310df106ad25d6fd85b88c4b8dfc639cbc4934d
parent0a4e96c7052305969e2955892d490860e244afd6 (diff)
misc/llama-cpp: New port: Facebook's LLaMA model in C/C++
-rw-r--r--misc/Makefile1
-rw-r--r--misc/llama-cpp/Makefile36
-rw-r--r--misc/llama-cpp/distinfo5
-rw-r--r--misc/llama-cpp/pkg-descr3
-rw-r--r--misc/llama-cpp/pkg-plist38
5 files changed, 83 insertions, 0 deletions
diff --git a/misc/Makefile b/misc/Makefile
index c9a42d646543..345fe32c1080 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -245,6 +245,7 @@
SUBDIR += lifelines
SUBDIR += lightgbm
SUBDIR += lingoteach
+ SUBDIR += llama-cpp
SUBDIR += locale-en_DK
SUBDIR += localedata
SUBDIR += logsurfer
diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
new file mode 100644
index 000000000000..3cabec77cc9a
--- /dev/null
+++ b/misc/llama-cpp/Makefile
@@ -0,0 +1,36 @@
+PORTNAME= llama-cpp
+DISTVERSIONPREFIX= b
+DISTVERSION= 2144
+CATEGORIES= misc # machine-learning
+
+MAINTAINER= yuri@FreeBSD.org
+COMMENT= Facebook's LLaMA model in C/C++
+WWW= https://github.com/ggerganov/llama.cpp
+
+LICENSE= MIT
+LICENSE_FILE= ${WRKSRC}/LICENSE
+
+USES= cmake:testing python:run shebangfix
+USE_LDCONFIG= yes
+
+USE_GITHUB= yes
+GH_ACCOUNT= ggerganov
+GH_PROJECT= llama.cpp
+GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute
+
+SHEBANG_GLOB= *.py
+
+CMAKE_ON= BUILD_SHARED_LIBS
+CMAKE_OFF= LLAMA_BUILD_TESTS
+CMAKE_TESTING_ON= LLAMA_BUILD_TESTS
+
+LDFLAGS+= -pthread
+
+OPTIONS_DEFINE= EXAMPLES
+OPTIONS_SUB= yes
+
+EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES
+
+BINARY_ALIAS= git=false
+
+.include <bsd.port.mk>
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
new file mode 100644
index 000000000000..b11d5d4b173e
--- /dev/null
+++ b/misc/llama-cpp/distinfo
@@ -0,0 +1,5 @@
+TIMESTAMP = 1707995361
+SHA256 (ggerganov-llama.cpp-b2144_GH0.tar.gz) = 679d2deb41b9df3d04bc5fb3b8fd255717009a08927962eca6476f26bff74731
+SIZE (ggerganov-llama.cpp-b2144_GH0.tar.gz) = 8562099
+SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
+SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-descr b/misc/llama-cpp/pkg-descr
new file mode 100644
index 000000000000..176d4fa350e8
--- /dev/null
+++ b/misc/llama-cpp/pkg-descr
@@ -0,0 +1,3 @@
+The main goal of llama.cpp is to enable LLM inference with minimal setup and
+state-of-the-art performance on a wide variety of hardware - locally and in
+the cloud.
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
new file mode 100644
index 000000000000..ced1b9e1d19c
--- /dev/null
+++ b/misc/llama-cpp/pkg-plist
@@ -0,0 +1,38 @@
+%%EXAMPLES%%bin/baby-llama
+%%EXAMPLES%%bin/batched
+%%EXAMPLES%%bin/batched-bench
+%%EXAMPLES%%bin/beam-search
+%%EXAMPLES%%bin/benchmark
+%%EXAMPLES%%bin/convert-llama2c-to-ggml
+%%EXAMPLES%%bin/convert-lora-to-ggml.py
+%%EXAMPLES%%bin/convert.py
+%%EXAMPLES%%bin/embedding
+%%EXAMPLES%%bin/export-lora
+%%EXAMPLES%%bin/finetune
+%%EXAMPLES%%bin/imatrix
+%%EXAMPLES%%bin/infill
+%%EXAMPLES%%bin/llama-bench
+%%EXAMPLES%%bin/llava-cli
+%%EXAMPLES%%bin/lookahead
+%%EXAMPLES%%bin/lookup
+%%EXAMPLES%%bin/main
+%%EXAMPLES%%bin/parallel
+%%EXAMPLES%%bin/passkey
+%%EXAMPLES%%bin/perplexity
+%%EXAMPLES%%bin/quantize
+%%EXAMPLES%%bin/quantize-stats
+%%EXAMPLES%%bin/save-load-state
+%%EXAMPLES%%bin/server
+%%EXAMPLES%%bin/simple
+%%EXAMPLES%%bin/speculative
+%%EXAMPLES%%bin/tokenize
+%%EXAMPLES%%bin/train-text-from-scratch
+include/ggml-alloc.h
+include/ggml-backend.h
+include/ggml.h
+include/llama.h
+lib/cmake/Llama/LlamaConfig.cmake
+lib/cmake/Llama/LlamaConfigVersion.cmake
+lib/libggml_shared.so
+lib/libllama.so
+%%EXAMPLES%%lib/libllava_shared.so