aboutsummaryrefslogtreecommitdiff
path: root/misc/ollama
diff options
context:
space:
mode:
Diffstat (limited to 'misc/ollama')
-rw-r--r--misc/ollama/Makefile103
-rw-r--r--misc/ollama/distinfo14
-rw-r--r--misc/ollama/files/patch-FreeBSD-compatibility277
-rw-r--r--misc/ollama/files/patch-llm_generate_gen__common.sh35
-rw-r--r--misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt10
-rw-r--r--misc/ollama/files/patch-ml_backend_ggml_ggml_src_ggml-backend-reg.cpp18
-rw-r--r--misc/ollama/files/patch-ml_path.go11
-rw-r--r--misc/ollama/pkg-descr15
-rw-r--r--misc/ollama/pkg-message4
9 files changed, 118 insertions, 369 deletions
diff --git a/misc/ollama/Makefile b/misc/ollama/Makefile
index 4b6abb3409da..961333179390 100644
--- a/misc/ollama/Makefile
+++ b/misc/ollama/Makefile
@@ -1,7 +1,7 @@
PORTNAME= ollama
DISTVERSIONPREFIX= v
-DISTVERSION= 0.3.6
-PORTREVISION= 5
+DISTVERSION= 0.13.1-rc0
+PORTREVISION= 1
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
@@ -12,49 +12,82 @@ WWW= https://ollama.com \
LICENSE= MIT
LICENSE_FILE= ${WRKSRC}/LICENSE
-ONLY_FOR_ARCHS= amd64
-ONLY_FOR_ARCHS_REASON= bundled patched llama-cpp is placed into the arch-specific path
-
BUILD_DEPENDS= bash:shells/bash \
- cmake:devel/cmake-core \
- glslc:graphics/shaderc \
- vulkan-headers>0:graphics/vulkan-headers
-LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
-
-USES= go:1.22,modules pkgconfig
+ ${LOCALBASE}/include/miniaudio/miniaudio.h:audio/miniaudio \
+ ${LOCALBASE}/include/nlohmann/json_fwd.hpp:devel/nlohmann-json \
+ ${LOCALBASE}/include/stb/stb_image.h:devel/stb
-CONFLICTS_BUILD= llama-cpp
+USES= cmake:indirect go:1.24,modules localbase pkgconfig
-GO_MODULE= github.com/${PORTNAME}/${PORTNAME}
+GO_MODULE= github.com/yurivict/${PORTNAME} # fork with FreeBSD patches
GO_TARGET= .
-
-USE_GITHUB= nodefault
-GH_TUPLE= ggerganov:llama.cpp:1e6f6554aa11fa10160a5fda689e736c3c34169f:llama_cpp/llm/llama.cpp \
- blabber:go-freebsd-sysctl:503969f:go_sysctl/vendor.x/github.com/blabber/go-freebsd-sysctl
-
-MAKE_ENV= PATH=${PATH}:${WRKSRC}/llm/build/bsd/x86_64_static/bin # workaround to find vulkan-shaders-gen
+GO_ENV+= CGO_CXXFLAGS="${CXXFLAGS}"
PLIST_FILES= bin/${PORTNAME} \
bin/ollama-limit-gpu-layers
-post-patch: # workaround for https://github.com/ollama/ollama/issues/6259 (use of extenral libllama.so)
- @${REINPLACE_CMD} \
- -e '\
- s| llama | llama ${LOCALBASE}/lib/libvulkan.so omp pthread |; \
- s| llama | ${WRKSRC}/llm/build/bsd/x86_64_static/src/libllama.a |; \
- s| ggml | ${WRKSRC}/llm/build/bsd/x86_64_static/ggml/src/libggml.a |; \
- ' \
- ${WRKSRC}/llm/ext_server/CMakeLists.txt
- # move vendor.x to vendor
- @(cd ${WRKSRC}/vendor.x && ${TAR} cf - .) | (cd ${WRKSRC}/vendor && ${TAR} xf -)
-
-pre-build:
- @${CP} ${WRKSRC}/app/store/store_linux.go ${WRKSRC}/app/store/store_bsd.go
- @cd ${GO_WRKSRC} && \
- ${SETENVI} ${WRK_ENV} ${MAKE_ENV} ${GO_ENV} GOMAXPROCS=${MAKE_JOBS_NUMBER} GOPROXY=off ${GO_CMD} generate ${GO_BUILDFLAGS} \
- ./...
+OPTIONS_GROUP= BACKENDS
+OPTIONS_GROUP_BACKENDS= CPU VULKAN
+OPTIONS_DEFAULT= CPU VULKAN
+
+CPU_DESC= Build CPU backend shared libraries
+CPU_PLIST_FILES= lib/ollama/libggml-base.so \
+ lib/ollama/libggml-cpu-alderlake.so \
+ lib/ollama/libggml-cpu-haswell.so \
+ lib/ollama/libggml-cpu-icelake.so \
+ lib/ollama/libggml-cpu-sandybridge.so \
+ lib/ollama/libggml-cpu-skylakex.so \
+ lib/ollama/libggml-cpu-sse42.so \
+ lib/ollama/libggml-cpu-x64.so
+
+VULKAN_DESC= Build Vulkan GPU backend shared library
+VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \
+ ${LOCALBASE}/include/vulkan/vulkan.h:graphics/vulkan-headers
+VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
+VULKAN_PLIST_FILES= lib/ollama/libggml-vulkan.so
+
+.include <bsd.port.options.mk>
+
+_CMAKE_FLAGS= -DCMAKE_BUILD_TYPE=Release -DGGML_BACKEND_DL=ON -DGGML_BACKEND_DIR=${PREFIX}/lib/ollama
+
+post-patch: # change import path to the fork
+ @cd ${WRKSRC} && \
+ (${GREP} -rl ollama/ollama | ${XARGS} ${REINPLACE_CMD} -i '' -e 's|ollama/ollama|yurivict/ollama|g')
+
+pre-build-CPU-on:
+ @${MKDIR} ${WRKSRC}/build && \
+ cd ${WRKSRC}/build && \
+ ${CMAKE_BIN} ${_CMAKE_FLAGS} .. && \
+ ${MAKE_CMD} ggml-base && \
+ ${MAKE_CMD} ggml-cpu
+
+pre-build-VULKAN-on:
+.if !${PORT_OPTIONS:MCPU}
+ @${MKDIR} ${WRKSRC}/build && \
+ cd ${WRKSRC}/build && \
+ ${CMAKE_BIN} ${_CMAKE_FLAGS} ..
+.endif
+ @cd ${WRKSRC}/build && \
+ ${MAKE_CMD} ggml-vulkan
post-install: # pending https://github.com/ollama/ollama/issues/6407
${INSTALL_SCRIPT} ${FILESDIR}/ollama-limit-gpu-layers ${STAGEDIR}${PREFIX}/bin
+post-install-CPU-on:
+ @${MKDIR} ${STAGEDIR}${PREFIX}/lib/ollama
+ ${INSTALL_LIB} ${WRKSRC}/build/lib/ollama/libggml-base.so \
+ ${STAGEDIR}${PREFIX}/lib/ollama/
+ @for f in ${WRKSRC}/build/lib/ollama/libggml-cpu*.so; do \
+ ${INSTALL_LIB} $$f ${STAGEDIR}${PREFIX}/lib/ollama/; \
+ done
+
+post-install-VULKAN-on:
+ @${MKDIR} ${STAGEDIR}${PREFIX}/lib/ollama
+ ${INSTALL_LIB} ${WRKSRC}/build/lib/ollama/libggml-vulkan.so \
+ ${STAGEDIR}${PREFIX}/lib/ollama/
+
+do-test:
+ @cd ${WRKSRC} && \
+ ${SETENVI} ${WRK_ENV} ${MAKE_ENV} ${GO_ENV} ${GO_CMD} test ./...
+
.include <bsd.port.mk>
diff --git a/misc/ollama/distinfo b/misc/ollama/distinfo
index 86cae5d113d3..a22b0ff08646 100644
--- a/misc/ollama/distinfo
+++ b/misc/ollama/distinfo
@@ -1,9 +1,5 @@
-TIMESTAMP = 1724010094
-SHA256 (go/misc_ollama/ollama-v0.3.6/v0.3.6.mod) = 16c078d8f0b29f84598fb04e3979acf86da41eb41bf4ff8363548e490f38b54e
-SIZE (go/misc_ollama/ollama-v0.3.6/v0.3.6.mod) = 2992
-SHA256 (go/misc_ollama/ollama-v0.3.6/v0.3.6.zip) = 94d2376c9555dd03a76cb093f3213e8155811874db7eab6aff2941d0e75dce07
-SIZE (go/misc_ollama/ollama-v0.3.6/v0.3.6.zip) = 1842735
-SHA256 (go/misc_ollama/ollama-v0.3.6/ggerganov-llama.cpp-1e6f6554aa11fa10160a5fda689e736c3c34169f_GH0.tar.gz) = b2b3137f734fc9a202fac710986f1de837e0ae69a0b532d4cbadb90748f4cb73
-SIZE (go/misc_ollama/ollama-v0.3.6/ggerganov-llama.cpp-1e6f6554aa11fa10160a5fda689e736c3c34169f_GH0.tar.gz) = 19016896
-SHA256 (go/misc_ollama/ollama-v0.3.6/blabber-go-freebsd-sysctl-503969f_GH0.tar.gz) = 1f497780d88f6f056b8d8f59b6aa129566c8041d16d1cda89f212accf88f3165
-SIZE (go/misc_ollama/ollama-v0.3.6/blabber-go-freebsd-sysctl-503969f_GH0.tar.gz) = 2117
+TIMESTAMP = 1764279505
+SHA256 (go/misc_ollama/ollama-v0.13.1-rc0/v0.13.1-rc0.mod) = 24e9aaaef0e2169fef54d14b95b528fce46e0f6788ffb71a93bcd3b035f99654
+SIZE (go/misc_ollama/ollama-v0.13.1-rc0/v0.13.1-rc0.mod) = 3454
+SHA256 (go/misc_ollama/ollama-v0.13.1-rc0/v0.13.1-rc0.zip) = 07d7cb01a4556fb134ab8c4986239afcf87363c12a074e60c9893dda977a5ea3
+SIZE (go/misc_ollama/ollama-v0.13.1-rc0/v0.13.1-rc0.zip) = 20653199
diff --git a/misc/ollama/files/patch-FreeBSD-compatibility b/misc/ollama/files/patch-FreeBSD-compatibility
deleted file mode 100644
index 046ca9b3417c..000000000000
--- a/misc/ollama/files/patch-FreeBSD-compatibility
+++ /dev/null
@@ -1,277 +0,0 @@
--- patch based on https://github.com/ollama/ollama/issues/1102#issuecomment-2270042340
-
-new file mode 100644
---- gpu/gpu_bsd.go.orig 2024-08-18 20:03:12 UTC
-+++ gpu/gpu_bsd.go
-@@ -0,0 +1,122 @@
-+//go:build dragonfly || freebsd || netbsd || openbsd
-+
-+package gpu
-+
-+import "github.com/ollama/ollama/format"
-+//import sysctl "github.com/lorenzosaino/go-sysctl" // sysctl: this is Linux-only, see https://github.com/lorenzosaino/go-sysctl/issues/7
-+import sysctl "github.com/blabber/go-freebsd-sysctl/sysctl" // sysctl: this is FreeBSD-only basic library
-+import (
-+ "log/slog"
-+)
-+
-+/*
-+#cgo CFLAGS: -I/usr/local/include
-+#cgo LDFLAGS: -L/usr/local/lib -lvulkan
-+
-+#include <stdbool.h>
-+#include <unistd.h>
-+#include <vulkan/vulkan.h>
-+
-+bool hasVulkanSupport(uint64_t *memSize) {
-+ VkInstance instance;
-+
-+ VkApplicationInfo appInfo = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
-+ appInfo.pApplicationName = "Ollama";
-+ appInfo.apiVersion = VK_API_VERSION_1_0;
-+
-+ VkInstanceCreateInfo createInfo = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
-+ createInfo.pApplicationInfo = &appInfo;
-+
-+ // Create a Vulkan instance
-+ if (vkCreateInstance(&createInfo, NULL, &instance) != VK_SUCCESS)
-+ return false;
-+
-+ // Fetch the first physical Vulkan device. Note that numDevices is overwritten with the number of devices found
-+ uint32_t numDevices = 1;
-+ VkPhysicalDevice device;
-+ vkEnumeratePhysicalDevices(instance, &numDevices, &device);
-+ if (numDevices == 0) {
-+ vkDestroyInstance(instance, NULL);
-+ return false;
-+ }
-+
-+ // Fetch the memory information for this device.
-+ VkPhysicalDeviceMemoryProperties memProperties;
-+ vkGetPhysicalDeviceMemoryProperties(device, &memProperties);
-+
-+ // Add up all the heaps.
-+ VkDeviceSize totalMemory = 0;
-+ for (uint32_t i = 0; i < memProperties.memoryHeapCount; ++i) {
-+ if (memProperties.memoryHeaps[i].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) {
-+ *memSize += memProperties.memoryHeaps[i].size;
-+ }
-+ }
-+
-+ vkDestroyInstance(instance, NULL);
-+ return true;
-+}
-+*/
-+import "C"
-+
-+func GetGPUInfo() GpuInfoList {
-+ var gpuMem C.uint64_t
-+ if C.hasVulkanSupport(&gpuMem) {
-+ // Vulkan supported
-+ return []GpuInfo{
-+ {
-+ Library: "vulkan",
-+ ID: "0",
-+ MinimumMemory: 512 * format.MebiByte,
-+ memInfo: memInfo{
-+ FreeMemory: uint64(gpuMem),
-+ TotalMemory: uint64(gpuMem),
-+ },
-+ },
-+ }
-+ }
-+
-+ // CPU fallback
-+ cpuMem, _ := GetCPUMem()
-+ return []GpuInfo{
-+ {
-+ Library: "cpu",
-+ memInfo: cpuMem,
-+ },
-+ }
-+}
-+
-+func GetCPUInfo() GpuInfoList {
-+ mem, _ := GetCPUMem()
-+ return []GpuInfo{
-+ {
-+ Library: "cpu",
-+ Variant: GetCPUCapability(),
-+ memInfo: mem,
-+ },
-+ }
-+}
-+
-+func GetCPUMem() (memInfo, error) {
-+ // all involved sysctl variables
-+ sysctl_vm_page_size, _ := sysctl.GetInt64("vm.stats.vm.v_page_size") // memory page size
-+ sysctl_hw_physmem, _ := sysctl.GetInt64("hw.physmem") // physical memory in bytes
-+ sysctl_vm_free_count, _ := sysctl.GetInt64("vm.stats.vm.v_free_count") // free page count
-+ sysctl_vm_swap_total, _ := sysctl.GetInt64("vm.swap_total") // total swap size in bytes
-+
-+ // individual values
-+ total_memory := uint64(sysctl_hw_physmem)
-+ free_memory := uint64(sysctl_vm_free_count) * uint64(sysctl_vm_page_size)
-+ free_swap := uint64(sysctl_vm_swap_total) // wrong to use the total swap size here, should be vm.swap_free, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=280909
-+
-+ slog.Debug("gpu_bsd.go::GetCPUMem::GetCPUMem", "total_memory", total_memory, "free_memory", free_memory, "free_swap", free_swap)
-+
-+ return memInfo{
-+ TotalMemory: uint64(total_memory),
-+ FreeMemory: uint64(free_memory),
-+ FreeSwap: uint64(free_swap),
-+ }, nil
-+}
-+
-+func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
-+ return "", ""
-+}
---- gpu/gpu_test.go.orig 1979-11-30 08:00:00 UTC
-+++ gpu/gpu_test.go
-@@ -11,7 +11,7 @@ func TestBasicGetGPUInfo(t *testing.T) {
- func TestBasicGetGPUInfo(t *testing.T) {
- info := GetGPUInfo()
- assert.NotEmpty(t, len(info))
-- assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
-+ assert.Contains(t, "cuda rocm cpu metal vulkan", info[0].Library)
- if info[0].Library != "cpu" {
- assert.Greater(t, info[0].TotalMemory, uint64(0))
- assert.Greater(t, info[0].FreeMemory, uint64(0))
-@@ -24,6 +24,8 @@ func TestCPUMemInfo(t *testing.T) {
- switch runtime.GOOS {
- case "darwin":
- t.Skip("CPU memory not populated on darwin")
-+ case "dragonfly", "freebsd", "netbsd", "openbsd":
-+ t.Skip("CPU memory is not populated on *BSD")
- case "linux", "windows":
- assert.Greater(t, info.TotalMemory, uint64(0))
- assert.Greater(t, info.FreeMemory, uint64(0))
---- llm/generate/gen_bsd.sh.orig 2024-08-06 16:29:05 UTC
-+++ llm/generate/gen_bsd.sh
-@@ -0,0 +1,54 @@
-+#!/bin/sh
-+# This script is intended to run inside the go generate
-+# working directory must be ./llm/generate/
-+
-+set -ex
-+set -o pipefail
-+echo "Starting BSD generate script"
-+. $(dirname $0)/gen_common.sh
-+init_vars
-+#git_module_setup
-+apply_patches
-+
-+COMMON_BSD_DEFS="-DCMAKE_SYSTEM_NAME=$(uname -s)"
-+CMAKE_TARGETS="--target llama --target ggml"
-+
-+COMMON_CPU_DEFS="${COMMON_BSD_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DBUILD_SHARED_LIBS=off"
-+
-+# Static build for linking into the Go binary
-+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
-+BUILD_DIR="../build/bsd/${ARCH}_static"
-+echo "Building static library"
-+build
-+
-+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
-+BUILD_DIR="../build/bsd/${ARCH}/cpu"
-+echo "Building LCD CPU"
-+build
-+compress
-+
-+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
-+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx"
-+echo "Building AVX CPU"
-+build
-+compress
-+
-+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
-+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx2"
-+echo "Building AVX2 CPU"
-+build
-+compress
-+
-+init_vars
-+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on ${CMAKE_DEFS}"
-+BUILD_DIR="../build/bsd/${ARCH}/vulkan"
-+echo "Building Vulkan GPU"
-+build
-+compress
-+
-+cleanup
-+echo "go generate completed. LLM runners: $(cd ${BUILD_DIR}/..; echo *)"
---- llm/generate/generate_bsd.go.orig 2024-08-06 07:41:26 UTC
-+++ llm/generate/generate_bsd.go
-@@ -0,0 +1,5 @@
-+//go:build dragonfly || freebsd || netbsd || openbsd
-+
-+package generate
-+
-+//go:generate bash ./gen_bsd.sh
---- llm/llm.go.orig 1979-11-30 08:00:00 UTC
-+++ llm/llm.go
-@@ -8,6 +8,10 @@ package llm
- // #cgo windows,arm64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src
- // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
- // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
-+// #cgo dragonfly,amd64 LDFLAGS: ${SRCDIR}/build/bsd/x86_64_static/src/libllama.a -lstdc++ -lm
-+// #cgo freebsd,amd64 LDFLAGS: -L${SRCDIR}/build/bsd/x86_64_static/src -lllama -L${SRCDIR}/build/bsd/x86_64_static/ggml/src -lggml -lstdc++ -lm -lomp
-+// #cgo netbsd,amd64 LDFLAGS: ${SRCDIR}/build/bsd/x86_64_static/src/libllama.a -lstdc++ -lm
-+// #cgo openbsd,amd64 LDFLAGS: ${SRCDIR}/build/bsd/x86_64_static/src/libllama.a -lstdc++ -lm
- // #include <stdlib.h>
- // #include "llama.h"
- import "C"
---- llm/llm_bsd.go.orig 2024-08-06 07:41:26 UTC
-+++ llm/llm_bsd.go
-@@ -0,0 +1,13 @@
-+//go:build dragonfly || freebsd || netbsd || openbsd
-+
-+package llm
-+
-+import (
-+ "embed"
-+ "syscall"
-+)
-+
-+//go:embed build/bsd/*/*/bin/*
-+var libEmbed embed.FS
-+
-+var LlamaServerSysProcAttr = &syscall.SysProcAttr{}
---- scripts/build_bsd.sh.orig 2024-08-06 07:41:26 UTC
-+++ scripts/build_bsd.sh
-@@ -0,0 +1,27 @@
-+#!/bin/sh
-+
-+set -e
-+
-+case "$(uname -s)" in
-+ DragonFly)
-+ ;;
-+ FreeBSD)
-+ ;;
-+ NetBSD)
-+ ;;
-+ OpenBSD)
-+ ;;
-+ *)
-+ echo "$(uname -s) is not supported"
-+ exit 1
-+ ;;
-+esac
-+
-+export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")}
-+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
-+
-+mkdir -p dist
-+rm -rf llm/llama.cpp/build
-+
-+go generate ./...
-+CGO_ENABLED=1 go build -trimpath -o dist/ollama-bsd
---- scripts/build_freebsd.sh.orig 2024-08-06 07:41:26 UTC
-+++ scripts/build_freebsd.sh
-@@ -0,0 +1 @@
-+build_bsd.sh
-\ No newline at end of file
diff --git a/misc/ollama/files/patch-llm_generate_gen__common.sh b/misc/ollama/files/patch-llm_generate_gen__common.sh
deleted file mode 100644
index 6033d77dc663..000000000000
--- a/misc/ollama/files/patch-llm_generate_gen__common.sh
+++ /dev/null
@@ -1,35 +0,0 @@
---- llm/generate/gen_common.sh.orig 1979-11-30 08:00:00 UTC
-+++ llm/generate/gen_common.sh
-@@ -68,13 +68,8 @@ apply_patches() {
- if [ -n "$(ls -A ../patches/*.diff)" ]; then
- # apply temporary patches until fix is upstream
- for patch in ../patches/*.diff; do
-- for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/); do
-- (cd ${LLAMACPP_DIR}; git checkout ${file})
-- done
-+ (cd ${LLAMACPP_DIR} && patch -B '' -p1 < ${patch})
- done
-- for patch in ../patches/*.diff; do
-- (cd ${LLAMACPP_DIR} && git apply ${patch})
-- done
- fi
- }
-
-@@ -106,14 +101,8 @@ compress() {
- }
-
- # Keep the local tree clean after we're done with the build
--cleanup() {
-- (cd ${LLAMACPP_DIR}/ && git checkout CMakeLists.txt)
-
-- if [ -n "$(ls -A ../patches/*.diff)" ]; then
-- for patch in ../patches/*.diff; do
-- for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/); do
-- (cd ${LLAMACPP_DIR}; git checkout ${file})
-- done
-- done
-- fi
-+cleanup() {
-+ echo "cleanup done"
- }
-+
diff --git a/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt b/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt
deleted file mode 100644
index a9ef6bec163e..000000000000
--- a/misc/ollama/files/patch-llm_llama.cpp_ggml_src_vulkan-shaders_CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
---- llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt.orig 2024-08-06 15:33:39 UTC
-+++ llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt
-@@ -2,6 +2,7 @@ add_executable(${TARGET} vulkan-shaders-gen.cpp)
-
- set(TARGET vulkan-shaders-gen)
- add_executable(${TARGET} vulkan-shaders-gen.cpp)
-+target_link_libraries(${TARGET} PRIVATE pthread)
- install(TARGETS ${TARGET} RUNTIME)
- target_compile_features(${TARGET} PRIVATE cxx_std_11)
- target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads)
diff --git a/misc/ollama/files/patch-ml_backend_ggml_ggml_src_ggml-backend-reg.cpp b/misc/ollama/files/patch-ml_backend_ggml_ggml_src_ggml-backend-reg.cpp
new file mode 100644
index 000000000000..17d3116f0faa
--- /dev/null
+++ b/misc/ollama/files/patch-ml_backend_ggml_ggml_src_ggml-backend-reg.cpp
@@ -0,0 +1,18 @@
+--- ml/backend/ggml/ggml/src/ggml-backend-reg.cpp.orig 2025-11-29 00:42:10.354754000 -0800
++++ ml/backend/ggml/ggml/src/ggml-backend-reg.cpp 2025-11-29 00:42:20.531699000 -0800
+@@ -538,11 +538,13 @@
+ std::vector<fs::path> search_paths;
+ if (user_search_path == nullptr) {
+ #ifdef GGML_BACKEND_DIR
++ // On FreeBSD, GGML_BACKEND_DIR is set to the correct library directory
+ search_paths.push_back(fs::u8path(GGML_BACKEND_DIR));
+-#endif
+- // default search paths: executable directory, current directory
++#else
++ // Fallback: search executable directory and current directory
+ search_paths.push_back(get_executable_path());
+ search_paths.push_back(fs::current_path());
++#endif
+ } else {
+ search_paths.push_back(fs::u8path(user_search_path));
+ }
diff --git a/misc/ollama/files/patch-ml_path.go b/misc/ollama/files/patch-ml_path.go
new file mode 100644
index 000000000000..c9a0ef4dfb8f
--- /dev/null
+++ b/misc/ollama/files/patch-ml_path.go
@@ -0,0 +1,11 @@
+--- ml/path.go.orig 2025-11-29 01:38:34.298472000 -0800
++++ ml/path.go 2025-11-29 01:39:10.084873000 -0800
+@@ -27,7 +27,7 @@
+ switch runtime.GOOS {
+ case "windows":
+ libPath = filepath.Join(filepath.Dir(exe), "lib", "ollama")
+- case "linux":
++ case "linux", "freebsd":
+ libPath = filepath.Join(filepath.Dir(exe), "..", "lib", "ollama")
+ case "darwin":
+ libPath = filepath.Dir(exe)
diff --git a/misc/ollama/pkg-descr b/misc/ollama/pkg-descr
index 96a69944f608..ea5609a7df6e 100644
--- a/misc/ollama/pkg-descr
+++ b/misc/ollama/pkg-descr
@@ -1,3 +1,14 @@
-Ollama allows to get up and running with large language models.
+Ollama is a tool that allows you to get up and running with large language
+models locally. It provides a simple command-line interface to run and
+manage models, as well as a REST API for programmatic access.
-Ollama supports a list of models available on ollama.com/library.
+Ollama supports a wide range of models available on ollama.com/library,
+including popular models like Llama 3, Gemma, and Mistral. It also
+allows you to customize models and create your own.
+
+With Ollama, you can:
+- Run large language models on your own machine
+- Chat with models in the terminal
+- Generate text and embeddings
+- Customize models with your own prompts and data
+- Expose models through a REST API for use in your applications
diff --git a/misc/ollama/pkg-message b/misc/ollama/pkg-message
index 4c41529e55fe..4d6db4b1a43a 100644
--- a/misc/ollama/pkg-message
+++ b/misc/ollama/pkg-message
@@ -7,9 +7,11 @@ To run ollama, plese open 2 terminals.
1. In the first terminal, please run:
$ OLLAMA_NUM_PARALLEL=1 OLLAMA_DEBUG=1 LLAMA_DEBUG=1 ollama start
2. In the second terminal, please run:
+ $ ollama run gemma3
+ or
$ ollama run mistral
-This will download and run the AI model "mistral".
+This will download and run the specified AI model.
You will be able to interact with it in plain English.
Please see https://ollama.com/library for the list