ollama: add module (#5735)

This commit is contained in:
Terje Larsen 2025-01-10 12:31:03 +01:00 committed by GitHub
parent d4aebb947a
commit 2532b500c3
Failed to generate hash of commit
6 changed files with 138 additions and 0 deletions

View file

@ -346,6 +346,7 @@ let
./services/nextcloud-client.nix
./services/nix-gc.nix
./services/notify-osd.nix
./services/ollama.nix
./services/opensnitch-ui.nix
./services/osmscout-server.nix
./services/owncloud-client.nix

View file

@ -0,0 +1,97 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.ollama;
ollamaPackage = if cfg.acceleration == null then
cfg.package
else
cfg.package.override { inherit (cfg) acceleration; };
in {
meta.maintainers = [ maintainers.terlar ];
options = {
services.ollama = {
enable = mkEnableOption "ollama server for local large language models";
package = mkPackageOption pkgs "ollama" { };
host = mkOption {
type = types.str;
default = "127.0.0.1";
example = "[::]";
description = ''
The host address which the ollama server HTTP interface listens to.
'';
};
port = mkOption {
type = types.port;
default = 11434;
example = 11111;
description = ''
Which port the ollama server listens to.
'';
};
acceleration = mkOption {
type = types.nullOr (types.enum [ false "rocm" "cuda" ]);
default = null;
example = "rocm";
description = ''
What interface to use for hardware acceleration.
- `null`: default behavior
- if `nixpkgs.config.rocmSupport` is enabled, uses `"rocm"`
- if `nixpkgs.config.cudaSupport` is enabled, uses `"cuda"`
- otherwise defaults to `false`
- `false`: disable GPU, only use CPU
- `"rocm"`: supported by most modern AMD GPUs
- may require overriding gpu type with `services.ollama.rocmOverrideGfx`
if rocm doesn't detect your AMD gpu
- `"cuda"`: supported by most modern NVIDIA GPUs
'';
};
environmentVariables = mkOption {
type = types.attrsOf types.str;
default = { };
example = {
OLLAMA_LLM_LIBRARY = "cpu";
HIP_VISIBLE_DEVICES = "0,1";
};
description = ''
Set arbitrary environment variables for the ollama service.
Be aware that these are only seen by the ollama server (systemd service),
not normal invocations like `ollama run`.
Since `ollama run` is mostly a shell around the ollama server, this is usually sufficient.
'';
};
};
};
config = mkIf cfg.enable {
systemd.user.services.ollama = {
Unit = {
Description = "Server for local large language models";
After = [ "network.target" ];
};
Service = {
ExecStart = "${getExe ollamaPackage} serve";
Environment =
(mapAttrsToList (n: v: "${n}=${v}") cfg.environmentVariables)
++ [ "OLLAMA_HOST=${cfg.host}:${toString cfg.port}" ];
};
Install = { WantedBy = [ "default.target" ]; };
};
home.packages = [ ollamaPackage ];
};
}

View file

@ -270,6 +270,7 @@ in import nmtSrc {
./modules/services/mpd-mpris
./modules/services/mpdris2
./modules/services/nix-gc
./modules/services/ollama
./modules/services/osmscout-server
./modules/services/pantalaimon
./modules/services/parcellite

View file

@ -0,0 +1,13 @@
{
config = {
services.ollama.enable = true;
test.stubs.ollama = { };
nmt.script = ''
serviceFile="home-files/.config/systemd/user/ollama.service"
assertFileRegex "$serviceFile" 'After=network\.target'
assertFileRegex "$serviceFile" 'Environment=OLLAMA_HOST=127.0.0.1:11434'
'';
};
}

View file

@ -0,0 +1,4 @@
{
ollama-basic = ./basic.nix;
ollama-set-environment-variables = ./set-environment-variables.nix;
}

View file

@ -0,0 +1,22 @@
{
config = {
services.ollama = {
enable = true;
host = "localhost";
port = 11111;
environmentVariables = {
OLLAMA_LLM_LIBRARY = "cpu";
HIP_VISIBLE_DEVICES = "0,1";
};
};
test.stubs.ollama = { };
nmt.script = ''
serviceFile="home-files/.config/systemd/user/ollama.service"
assertFileRegex "$serviceFile" 'Environment=OLLAMA_HOST=localhost:11111'
assertFileRegex "$serviceFile" 'Environment=OLLAMA_LLM_LIBRARY=cpu'
assertFileRegex "$serviceFile" 'Environment=HIP_VISIBLE_DEVICES=0,1'
'';
};
}