blob: c256f6b459e4f4863919ebec3efe8afdc9743130 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
PORTNAME= llama-cpp
DISTVERSIONPREFIX= b
DISTVERSION= 2843
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
COMMENT= Facebook's LLaMA model in C/C++ # '
WWW= https://github.com/ggerganov/llama.cpp
LICENSE= MIT
LICENSE_FILE= ${WRKSRC}/LICENSE
BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810
USES= cmake:testing compiler:c++11-lang python:run shebangfix
USE_LDCONFIG= yes
USE_GITHUB= yes
GH_ACCOUNT= ggerganov
GH_PROJECT= llama.cpp
GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute
SHEBANG_GLOB= *.py
CMAKE_ON= BUILD_SHARED_LIBS
CMAKE_OFF= LLAMA_BUILD_TESTS
CMAKE_TESTING_ON= LLAMA_BUILD_TESTS
LDFLAGS+= -pthread
OPTIONS_DEFINE= EXAMPLES
OPTIONS_SUB= yes
EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES
BINARY_ALIAS= git=false
# 1 test fails due to a missing model file (stories260K.gguf)
.include <bsd.port.mk>
|