about summary refs log tree commit diff
path: root/nixos/modules/services/web-apps/nextjs-ollama-llm-ui.nix
blob: 9bd2cf310c0af639825d577310a44a7cbb532201 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
  config,
  pkgs,
  lib,
  ...
}:
let
  cfg = config.services.nextjs-ollama-llm-ui;
  # we have to override the URL to a Ollama service here, because it gets baked into the web app.
  nextjs-ollama-llm-ui = cfg.package.override { inherit (cfg) ollamaUrl; };
in
{
  options = {
    services.nextjs-ollama-llm-ui = {
      enable = lib.mkEnableOption ''
        Simple Ollama web UI service; an easy to use web frontend for a Ollama backend service.
        Run state-of-the-art AI large language models (LLM) similar to ChatGPT locally with privacy
        on your personal computer.
        This service is stateless and doesn't store any data on the server; all data is kept
        locally in your web browser.
        See https://github.com/jakobhoeg/nextjs-ollama-llm-ui.

        Required: You need the Ollama backend service running by having
        "services.nextjs-ollama-llm-ui.ollamaUrl" point to the correct url.
        You can host such a backend service with NixOS through "services.ollama".
      '';
      package = lib.mkPackageOption pkgs "nextjs-ollama-llm-ui" { };

      hostname = lib.mkOption {
        type = lib.types.str;
        default = "127.0.0.1";
        example = "ui.example.org";
        description = ''
          The hostname under which the Ollama UI interface should be accessible.
          By default it uses localhost/127.0.0.1 to be accessible only from the local machine.
          Change to "0.0.0.0" to make it directly accessible from the local network.

          Note: You should keep it at 127.0.0.1 and only serve to the local
          network or internet from a (home) server behind a reverse-proxy and secured encryption.
          See https://wiki.nixos.org/wiki/Nginx for instructions on how to set up a reverse-proxy.
        '';
      };

      port = lib.mkOption {
        type = lib.types.port;
        default = 3000;
        example = 3000;
        description = ''
          The port under which the Ollama UI interface should be accessible.
        '';
      };

      ollamaUrl = lib.mkOption {
        type = lib.types.str;
        default = "127.0.0.1:11434";
        example = "https://ollama.example.org";
        description = ''
          The address (including host and port) under which we can access the Ollama backend server.
          !Note that if the the UI service is running under a domain "https://ui.example.org",
          the Ollama backend service must allow "CORS" requests from this domain, e.g. by adding
          "services.ollama.environment.OLLAMA_ORIGINS = [ ... "https://ui.example.org" ];"!
        '';
      };
    };
  };

  config = lib.mkIf cfg.enable {
    systemd.services = {

      nextjs-ollama-llm-ui = {
        wantedBy = [ "multi-user.target" ];
        description = "Nextjs Ollama LLM Ui.";
        after = [ "network.target" ];
        environment = {
          HOSTNAME = cfg.hostname;
          PORT = toString cfg.port;
          NEXT_PUBLIC_OLLAMA_URL = cfg.ollamaUrl;
        };
        serviceConfig = {
          ExecStart = "${lib.getExe nextjs-ollama-llm-ui}";
          DynamicUser = true;
        };
      };
    };
  };
  meta.maintainers = with lib.maintainers; [ malteneuss ];
}