about summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix60
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix40
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit/cdi-generate.nix35
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit/default.nix121
-rw-r--r--nixos/modules/services/mail/roundcube.nix2
-rw-r--r--nixos/modules/services/misc/greenclip.nix5
-rw-r--r--nixos/modules/services/misc/ollama.nix38
-rw-r--r--nixos/modules/services/networking/deconz.nix2
-rw-r--r--nixos/modules/services/networking/firewall-nftables.nix18
-rw-r--r--nixos/modules/services/networking/inadyn.nix250
-rw-r--r--nixos/modules/services/networking/netbird/coturn.nix160
-rw-r--r--nixos/modules/services/networking/netbird/dashboard.nix186
-rw-r--r--nixos/modules/services/networking/netbird/management.nix460
-rw-r--r--nixos/modules/services/networking/netbird/server.md42
-rw-r--r--nixos/modules/services/networking/netbird/server.nix67
-rw-r--r--nixos/modules/services/networking/netbird/signal.nix123
-rw-r--r--nixos/modules/services/networking/sunshine.nix156
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix13
-rw-r--r--nixos/modules/services/search/manticore.nix131
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix10
-rw-r--r--nixos/modules/services/system/earlyoom.nix39
-rw-r--r--nixos/modules/services/web-apps/coder.nix2
-rw-r--r--nixos/modules/services/web-apps/firefly-iii.nix367
-rw-r--r--nixos/modules/services/web-apps/limesurvey.nix16
-rw-r--r--nixos/modules/services/web-apps/mediawiki.nix4
-rw-r--r--nixos/modules/services/web-apps/pretalx.nix8
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/lxqt.nix2
28 files changed, 2223 insertions, 136 deletions
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
deleted file mode 100644
index 1aaa2d07b9bde..0000000000000
--- a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
+++ /dev/null
@@ -1,60 +0,0 @@
-{
-  addDriverRunpath,
-  glibc,
-  jq,
-  lib,
-  nvidia-container-toolkit,
-  nvidia-driver,
-  runtimeShell,
-  writeScriptBin,
-}:
-let
-  mountOptions = { options = ["ro" "nosuid" "nodev" "bind"]; };
-  mounts = [
-    # FIXME: Making /usr mounts optional
-    { hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
-      containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
-    { hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
-      containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
-    { hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
-      containerPath = "/usr/bin/nvidia-debugdump"; }
-    { hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
-      containerPath = "/usr/bin/nvidia-powerd"; }
-    { hostPath = lib.getExe' nvidia-driver "nvidia-smi";
-      containerPath = "/usr/bin/nvidia-smi"; }
-    { hostPath = lib.getExe' nvidia-container-toolkit "nvidia-ctk";
-      containerPath = "/usr/bin/nvidia-ctk"; }
-    { hostPath = "${lib.getLib glibc}/lib";
-      containerPath = "${lib.getLib glibc}/lib"; }
-
-    # FIXME: use closureinfo
-    {
-      hostPath = addDriverRunpath.driverLink;
-      containerPath = addDriverRunpath.driverLink;
-    }
-    { hostPath = "${lib.getLib glibc}/lib";
-      containerPath = "${lib.getLib glibc}/lib"; }
-    { hostPath = "${lib.getLib glibc}/lib64";
-      containerPath = "${lib.getLib glibc}/lib64"; }
-  ];
-  jqAddMountExpression = ".containerEdits.mounts[.containerEdits.mounts | length] |= . +";
-  mountsToJq = lib.concatMap
-    (mount:
-      ["${lib.getExe jq} '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
-    mounts;
-in
-writeScriptBin "nvidia-cdi-generator"
-''
-#! ${runtimeShell}
-
-function cdiGenerate {
-  ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"} cdi generate \
-    --format json \
-    --ldconfig-path ${lib.getExe' glibc "ldconfig"} \
-    --library-search-path ${lib.getLib nvidia-driver}/lib \
-    --nvidia-ctk-path ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"}
-}
-
-cdiGenerate | \
-  ${lib.concatStringsSep " | " mountsToJq} > $RUNTIME_DIRECTORY/nvidia-container-toolkit.json
-''
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
deleted file mode 100644
index 5aa3c72ee0a06..0000000000000
--- a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
+++ /dev/null
@@ -1,40 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-{
-
-  options = {
-
-    hardware.nvidia-container-toolkit-cdi-generator.enable = lib.mkOption {
-      default = false;
-      internal = true;
-      visible = false;
-      type = lib.types.bool;
-      description = ''
-        Enable dynamic CDI configuration for NVidia devices by running
-        nvidia-container-toolkit on boot.
-      '';
-    };
-
-  };
-
-  config = {
-
-    systemd.services.nvidia-container-toolkit-cdi-generator = lib.mkIf config.hardware.nvidia-container-toolkit-cdi-generator.enable {
-      description = "Container Device Interface (CDI) for Nvidia generator";
-      wantedBy = [ "multi-user.target" ];
-      after = [ "systemd-udev-settle.service" ];
-      serviceConfig = {
-        RuntimeDirectory = "cdi";
-        RemainAfterExit = true;
-        ExecStart =
-          let
-            script = pkgs.callPackage ./cdi-generate.nix { nvidia-driver = config.hardware.nvidia.package; };
-          in
-          lib.getExe script;
-        Type = "oneshot";
-      };
-    };
-
-  };
-
-}
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit/cdi-generate.nix b/nixos/modules/services/hardware/nvidia-container-toolkit/cdi-generate.nix
new file mode 100644
index 0000000000000..ca769cc44e5c9
--- /dev/null
+++ b/nixos/modules/services/hardware/nvidia-container-toolkit/cdi-generate.nix
@@ -0,0 +1,35 @@
+{
+  glibc,
+  jq,
+  lib,
+  mounts,
+  nvidia-container-toolkit,
+  nvidia-driver,
+  runtimeShell,
+  writeScriptBin,
+}: let
+  mkMount = {hostPath, containerPath, mountOptions}: {
+    inherit hostPath containerPath;
+    options = mountOptions;
+  };
+  jqAddMountExpression = ".containerEdits.mounts[.containerEdits.mounts | length] |= . +";
+  allJqMounts = lib.concatMap
+    (mount:
+      ["${lib.getExe jq} '${jqAddMountExpression} ${builtins.toJSON (mkMount mount)}'"])
+    mounts;
+in
+writeScriptBin "nvidia-cdi-generator"
+''
+#! ${runtimeShell}
+
+function cdiGenerate {
+  ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"} cdi generate \
+    --format json \
+    --ldconfig-path ${lib.getExe' glibc "ldconfig"} \
+    --library-search-path ${lib.getLib nvidia-driver}/lib \
+    --nvidia-ctk-path ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"}
+}
+
+cdiGenerate | \
+  ${lib.concatStringsSep " | " allJqMounts} > $RUNTIME_DIRECTORY/nvidia-container-toolkit.json
+''
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit/default.nix b/nixos/modules/services/hardware/nvidia-container-toolkit/default.nix
new file mode 100644
index 0000000000000..7b4973d3c6b0c
--- /dev/null
+++ b/nixos/modules/services/hardware/nvidia-container-toolkit/default.nix
@@ -0,0 +1,121 @@
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    (lib.mkRenamedOptionModule
+      [ "virtualisation" "containers" "cdi" "dynamic" "nvidia" "enable" ]
+      [ "hardware" "nvidia-container-toolkit" "enable" ])
+  ];
+
+  options = let
+    mountType = {
+      options = {
+        hostPath = lib.mkOption {
+          type = lib.types.str;
+          description = "Host path.";
+        };
+        containerPath = lib.mkOption {
+          type = lib.types.str;
+          description = "Container path.";
+        };
+        mountOptions = lib.mkOption {
+          default = [ "ro" "nosuid" "nodev" "bind" ];
+          type = lib.types.listOf lib.types.str;
+          description = "Mount options.";
+        };
+      };
+    };
+  in {
+
+    hardware.nvidia-container-toolkit = {
+      enable = lib.mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = ''
+          Enable dynamic CDI configuration for NVidia devices by running
+          nvidia-container-toolkit on boot.
+        '';
+      };
+
+      mounts = lib.mkOption {
+        type = lib.types.listOf (lib.types.submodule mountType);
+        default = [];
+        description = "Mounts to be added to every container under the Nvidia CDI profile.";
+      };
+
+      mount-nvidia-executables = lib.mkOption {
+        default = true;
+        type = lib.types.bool;
+        description = ''
+          Mount executables nvidia-smi, nvidia-cuda-mps-control, nvidia-cuda-mps-server,
+          nvidia-debugdump, nvidia-powerd and nvidia-ctk on containers.
+        '';
+      };
+
+      mount-nvidia-docker-1-directories = lib.mkOption {
+        default = true;
+        type = lib.types.bool;
+        description = ''
+          Mount nvidia-docker-1 directories on containers: /usr/local/nvidia/lib and
+          /usr/local/nvidia/lib64.
+        '';
+      };
+    };
+
+  };
+
+  config = {
+
+    hardware.nvidia-container-toolkit.mounts = let
+      nvidia-driver = config.hardware.nvidia.package;
+    in (lib.mkMerge [
+      [{ hostPath = pkgs.addDriverRunpath.driverLink;
+         containerPath = pkgs.addDriverRunpath.driverLink; }
+       { hostPath = "${lib.getLib pkgs.glibc}/lib";
+         containerPath = "${lib.getLib pkgs.glibc}/lib"; }
+       { hostPath = "${lib.getLib pkgs.glibc}/lib64";
+         containerPath = "${lib.getLib pkgs.glibc}/lib64"; }]
+      (lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-executables
+        [{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
+           containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
+         { hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
+           containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
+         { hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
+           containerPath = "/usr/bin/nvidia-debugdump"; }
+         { hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
+           containerPath = "/usr/bin/nvidia-powerd"; }
+         { hostPath = lib.getExe' nvidia-driver "nvidia-smi";
+           containerPath = "/usr/bin/nvidia-smi"; }])
+      # nvidia-docker 1.0 uses /usr/local/nvidia/lib{,64}
+      #   e.g.
+      #     - https://gitlab.com/nvidia/container-images/cuda/-/blob/e3ff10eab3a1424fe394899df0e0f8ca5a410f0f/dist/12.3.1/ubi9/base/Dockerfile#L44
+      #     - https://github.com/NVIDIA/nvidia-docker/blob/01d2c9436620d7dde4672e414698afe6da4a282f/src/nvidia/volumes.go#L104-L173
+      (lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-docker-1-directories
+        [{ hostPath = "${lib.getLib nvidia-driver}/lib";
+           containerPath = "/usr/local/nvidia/lib"; }
+         { hostPath = "${lib.getLib nvidia-driver}/lib";
+           containerPath = "/usr/local/nvidia/lib64"; }])
+    ]);
+
+    systemd.services.nvidia-container-toolkit-cdi-generator = lib.mkIf config.hardware.nvidia-container-toolkit.enable {
+      description = "Container Device Interface (CDI) for Nvidia generator";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      serviceConfig = {
+        RuntimeDirectory = "cdi";
+        RemainAfterExit = true;
+        ExecStart =
+          let
+            script = pkgs.callPackage ./cdi-generate.nix {
+              inherit (config.hardware.nvidia-container-toolkit) mounts;
+              nvidia-driver = config.hardware.nvidia.package;
+            };
+          in
+          lib.getExe script;
+        Type = "oneshot";
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index 4499532ace897..dfbdff7fb0113 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -7,7 +7,7 @@ let
   fpm = config.services.phpfpm.pools.roundcube;
   localDB = cfg.database.host == "localhost";
   user = cfg.database.username;
-  phpWithPspell = pkgs.php81.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
+  phpWithPspell = pkgs.php83.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
 in
 {
   options.services.roundcube = {
diff --git a/nixos/modules/services/misc/greenclip.nix b/nixos/modules/services/misc/greenclip.nix
index 9d1483a5a047a..d92cd1854877f 100644
--- a/nixos/modules/services/misc/greenclip.nix
+++ b/nixos/modules/services/misc/greenclip.nix
@@ -18,7 +18,10 @@ in {
       description = "greenclip daemon";
       wantedBy = [ "graphical-session.target" ];
       after    = [ "graphical-session.target" ];
-      serviceConfig.ExecStart = "${cfg.package}/bin/greenclip daemon";
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/greenclip daemon";
+        Restart = "always";
+      };
     };
 
     environment.systemPackages = [ cfg.package ];
diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix
index 948c8f17f9894..c0341984aa351 100644
--- a/nixos/modules/services/misc/ollama.nix
+++ b/nixos/modules/services/misc/ollama.nix
@@ -21,6 +21,8 @@ in
         example = "/home/foo";
         description = ''
           The home directory that the ollama service is started in.
+
+          See also `services.ollama.writablePaths` and `services.ollama.sandbox`.
         '';
       };
       models = lib.mkOption {
@@ -29,6 +31,37 @@ in
         example = "/path/to/ollama/models";
         description = ''
           The directory that the ollama service will read models from and download new models to.
+
+          See also `services.ollama.writablePaths` and `services.ollama.sandbox`
+          if downloading models or other mutation of the filesystem is required.
+        '';
+      };
+      sandbox = lib.mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = ''
+          Whether to enable systemd's sandboxing capabilities.
+
+          This sets [`DynamicUser`](
+          https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#DynamicUser=
+          ), which runs the server as a unique user with read-only access to most of the filesystem.
+
+          See also `services.ollama.writablePaths`.
+        '';
+      };
+      writablePaths = lib.mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "/home/foo" "/mnt/foo" ];
+        description = ''
+          Paths that the server should have write access to.
+
+          This sets [`ReadWritePaths`](
+          https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html#ReadWritePaths=
+          ), which allows specified paths to be written to through the default sandboxing.
+
+          See also `services.ollama.sandbox`.
         '';
       };
       listenAddress = lib.mkOption {
@@ -59,8 +92,8 @@ in
         type = types.attrsOf types.str;
         default = { };
         example = {
-          HOME = "/tmp";
           OLLAMA_LLM_LIBRARY = "cpu";
+          HIP_VISIBLE_DEVICES = "0,1";
         };
         description = ''
           Set arbitrary environment variables for the ollama service.
@@ -87,7 +120,8 @@ in
         ExecStart = "${lib.getExe ollamaPackage} serve";
         WorkingDirectory = cfg.home;
         StateDirectory = [ "ollama" ];
-        DynamicUser = true;
+        DynamicUser = cfg.sandbox;
+        ReadWritePaths = cfg.writablePaths;
       };
     };
 
diff --git a/nixos/modules/services/networking/deconz.nix b/nixos/modules/services/networking/deconz.nix
index eaa7759d0407c..88b0ee612d871 100644
--- a/nixos/modules/services/networking/deconz.nix
+++ b/nixos/modules/services/networking/deconz.nix
@@ -95,7 +95,7 @@ in
       '';
       postStart = ''
         # Delay signalling service readiness until it's actually up.
-        while ! "${lib.getExe pkgs.curl}" -sSfl -o /dev/null "http://${cfg.listenAddress}:${toString cfg.httpPort}"; do
+        while ! "${lib.getExe pkgs.curl}" -sSfL -o /dev/null "http://${cfg.listenAddress}:${toString cfg.httpPort}"; do
             echo "Waiting for TCP port ${toString cfg.httpPort} to be open..."
             sleep 1
         done
diff --git a/nixos/modules/services/networking/firewall-nftables.nix b/nixos/modules/services/networking/firewall-nftables.nix
index de336113843ef..a5ee7efc3c324 100644
--- a/nixos/modules/services/networking/firewall-nftables.nix
+++ b/nixos/modules/services/networking/firewall-nftables.nix
@@ -45,6 +45,18 @@ in
           This option only works with the nftables based firewall.
         '';
       };
+
+      extraReversePathFilterRules = mkOption {
+        type = types.lines;
+        default = "";
+        example = "fib daddr . mark . iif type local accept";
+        description = ''
+          Additional nftables rules to be appended to the rpfilter-allow
+          chain.
+
+          This option only works with the nftables based firewall.
+        '';
+      };
     };
 
   };
@@ -79,6 +91,8 @@ in
             meta nfproto ipv4 udp sport . udp dport { 67 . 68, 68 . 67 } accept comment "DHCPv4 client/server"
             fib saddr . mark ${optionalString (cfg.checkReversePath != "loose") ". iif"} oif exists accept
 
+            jump rpfilter-allow
+
             ${optionalString cfg.logReversePathDrops ''
               log level info prefix "rpfilter drop: "
             ''}
@@ -86,6 +100,10 @@ in
           }
         ''}
 
+        chain rpfilter-allow {
+          ${cfg.extraReversePathFilterRules}
+        }
+
         chain input {
           type filter hook input priority filter; policy drop;
 
diff --git a/nixos/modules/services/networking/inadyn.nix b/nixos/modules/services/networking/inadyn.nix
new file mode 100644
index 0000000000000..baa4302096c2c
--- /dev/null
+++ b/nixos/modules/services/networking/inadyn.nix
@@ -0,0 +1,250 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.inadyn;
+
+  # check if a value of an attrset is not null or an empty collection
+  nonEmptyValue = _: v: v != null && v != [ ] && v != { };
+
+  renderOption = k: v:
+    if builtins.elem k [ "provider" "custom" ] then
+      lib.concatStringsSep "\n"
+        (mapAttrsToList
+          (name: config: ''
+            ${k} ${name} {
+                ${lib.concatStringsSep "\n    " (mapAttrsToList renderOption (filterAttrs nonEmptyValue config))}
+            }'')
+          v)
+    else if k == "include" then
+      "${k}(\"${v}\")"
+    else if k == "hostname" && builtins.isList v then
+      "${k} = { ${builtins.concatStringsSep ", " (map (s: "\"${s}\"") v)} }"
+    else if builtins.isBool v then
+      "${k} = ${boolToString v}"
+    else if builtins.isString v then
+      "${k} = \"${v}\""
+    else
+      "${k} = ${toString v}";
+
+  configFile' = pkgs.writeText "inadyn.conf"
+    ''
+      # This file was generated by nix
+      # do not edit
+
+      ${(lib.concatStringsSep "\n" (mapAttrsToList renderOption (filterAttrs nonEmptyValue cfg.settings)))}
+    '';
+
+  configFile = if (cfg.configFile != null) then cfg.configFile else configFile';
+in
+{
+  options.services.inadyn = with types;
+    let
+      providerOptions =
+        {
+          include = mkOption {
+            default = null;
+            description = "File to include additional settings for this provider from.";
+            type = nullOr path;
+          };
+          ssl = mkOption {
+            default = true;
+            description = "Whether to use HTTPS for this DDNS provider.";
+            type = bool;
+          };
+          username = mkOption {
+            default = null;
+            description = "Username for this DDNS provider.";
+            type = nullOr str;
+          };
+          password = mkOption {
+            default = null;
+            description = ''
+              Password for this DDNS provider.
+
+              WARNING: This will be world-readable in the nix store.
+              To store credentials securely, use the `include` or `configFile` options.
+            '';
+            type = nullOr str;
+          };
+          hostname = mkOption {
+            default = "*";
+            example = "your.cool-domain.com";
+            description = "Hostname alias(es).";
+            type = either str (listOf str);
+          };
+        };
+    in
+    {
+      enable = mkEnableOption (''
+        synchronise your machine's IP address with a dynamic DNS provider using inadyn
+      '');
+      user = mkOption {
+        default = "inadyn";
+        type = types.str;
+        description = ''
+          User account under which inadyn runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the inadyn service starts.
+          :::
+        '';
+      };
+      group = mkOption {
+        default = "inadyn";
+        type = types.str;
+        description = ''
+          Group account under which inadyn runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the inadyn service starts.
+          :::
+        '';
+      };
+      interval = mkOption {
+        default = "*-*-* *:*:00";
+        description = ''
+          How often to check the current IP.
+          Uses the format described in {manpage}`systemd.time(7)`";
+        '';
+        type = str;
+      };
+      logLevel = lib.mkOption {
+        type = lib.types.enum [ "none" "err" "warning" "info" "notice" "debug" ];
+        default = "notice";
+        description = "Set inadyn's log level.";
+      };
+      settings = mkOption {
+        default = { };
+        description = "See `inadyn.conf (5)`";
+        type = submodule {
+          freeformType = attrs;
+          options = {
+            allow-ipv6 = mkOption {
+              default = config.networking.enableIPv6;
+              defaultText = "`config.networking.enableIPv6`";
+              description = "Whether to get IPv6 addresses from interfaces.";
+              type = bool;
+            };
+            forced-update = mkOption {
+              default = 2592000;
+              description = "Duration (in seconds) after which an update is forced.";
+              type = ints.positive;
+            };
+            provider = mkOption {
+              default = { };
+              description = ''
+                Settings for DDNS providers built-in to inadyn.
+
+                For a list of built-in providers, see `inadyn.conf (5)`.
+              '';
+              type = attrsOf (submodule {
+                freeformType = attrs;
+                options = providerOptions;
+              });
+            };
+            custom = mkOption {
+              default = { };
+              description = ''
+                Settings for custom DNS providers.
+              '';
+              type = attrsOf (submodule {
+                freeformType = attrs;
+                options = providerOptions // {
+                  ddns-server = mkOption {
+                    description = "DDNS server name.";
+                    type = str;
+                  };
+                  ddns-path = mkOption {
+                    description = ''
+                      DDNS server path.
+
+                      See `inadnyn.conf (5)` for a list for format specifiers that can be used.
+                    '';
+                    example = "/update?user=%u&password=%p&domain=%h&myip=%i";
+                    type = str;
+                  };
+                };
+              });
+            };
+          };
+        };
+      };
+      configFile = mkOption {
+        default = null;
+        description = ''
+          Configuration file for inadyn.
+
+          Setting this will override all other configuration options.
+
+          Passed to the inadyn service using LoadCredential.
+        '';
+        type = nullOr path;
+      };
+    };
+
+  config = lib.mkIf cfg.enable {
+    systemd = {
+      services.inadyn = {
+        description = "Update nameservers using inadyn";
+        documentation = [
+          "man:inadyn"
+          "man:inadyn.conf"
+          "file:${pkgs.inadyn}/share/doc/inadyn/README.md"
+        ];
+        requires = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        startAt = cfg.interval;
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = ''${lib.getExe pkgs.inadyn} -f ${configFile} --cache-dir ''${CACHE_DIRECTORY}/inadyn -1 --foreground -l ${cfg.logLevel}'';
+          LoadCredential = "config:${configFile}";
+          CacheDirectory = "inadyn";
+
+          User = cfg.user;
+          Group = cfg.group;
+          UMask = "0177";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          RestrictAddressFamilies = "AF_INET AF_INET6 AF_NETLINK";
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProtectSystem = "strict";
+          ProtectProc = "invisible";
+          ProtectHome = true;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallErrorNumber = "EPERM";
+          SystemCallFilter = "@system-service";
+          CapabilityBoundingSet = "";
+        };
+      };
+
+      timers.inadyn.timerConfig.Persistent = true;
+    };
+
+    users.users.inadyn = mkIf (cfg.user == "inadyn") {
+      group = cfg.group;
+      isSystemUser = true;
+    };
+
+    users.groups = mkIf (cfg.group == "inadyn") {
+      inadyn = { };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/netbird/coturn.nix b/nixos/modules/services/networking/netbird/coturn.nix
new file mode 100644
index 0000000000000..dd032abb2d75e
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/coturn.nix
@@ -0,0 +1,160 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  inherit (lib)
+    getExe
+    literalExpression
+    mkAfter
+    mkEnableOption
+    mkIf
+    mkMerge
+    mkOption
+    optionalAttrs
+    optionalString
+    ;
+
+  inherit (lib.types)
+    bool
+    listOf
+    nullOr
+    path
+    port
+    str
+    ;
+
+  cfg = config.services.netbird.server.coturn;
+in
+
+{
+  options.services.netbird.server.coturn = {
+    enable = mkEnableOption "a Coturn server for Netbird, will also open the firewall on the configured range";
+
+    useAcmeCertificates = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to use ACME certificates corresponding to the given domain for the server.
+      '';
+    };
+
+    domain = mkOption {
+      type = str;
+      description = "The domain under which the coturn server runs.";
+    };
+
+    user = mkOption {
+      type = str;
+      default = "netbird";
+      description = ''
+        The username used by netbird to connect to the coturn server.
+      '';
+    };
+
+    password = mkOption {
+      type = nullOr str;
+      default = null;
+      description = ''
+        The password of the user used by netbird to connect to the coturn server.
+      '';
+    };
+
+    passwordFile = mkOption {
+      type = nullOr path;
+      default = null;
+      description = ''
+        The path to a file containing the password of the user used by netbird to connect to the coturn server.
+      '';
+    };
+
+    openPorts = mkOption {
+      type = listOf port;
+      default = with config.services.coturn; [
+        listening-port
+        alt-listening-port
+        tls-listening-port
+        alt-tls-listening-port
+      ];
+      defaultText = literalExpression ''
+        with config.services.coturn; [
+          listening-port
+          alt-listening-port
+          tls-listening-port
+          alt-tls-listening-port
+        ];
+      '';
+
+      description = ''
+        The list of ports used by coturn for listening to open in the firewall.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      assertions = [
+        {
+          assertion = (cfg.password == null) != (cfg.passwordFile == null);
+          message = "Exactly one of `password` or `passwordFile` must be given for the coturn setup.";
+        }
+      ];
+
+      services.coturn =
+        {
+          enable = true;
+
+          realm = cfg.domain;
+          lt-cred-mech = true;
+          no-cli = true;
+
+          extraConfig = ''
+            fingerprint
+            user=${cfg.user}:${if cfg.password != null then cfg.password else "@password@"}
+            no-software-attribute
+          '';
+        }
+        // (optionalAttrs cfg.useAcmeCertificates {
+          cert = "@cert@";
+          pkey = "@pkey@";
+        });
+
+      systemd.services.coturn =
+        let
+          dir = config.security.acme.certs.${cfg.domain}.directory;
+          preStart' =
+            (optionalString (cfg.passwordFile != null) ''
+              ${getExe pkgs.replace-secret} @password@ ${cfg.passwordFile} /run/coturn/turnserver.cfg
+            '')
+            + (optionalString cfg.useAcmeCertificates ''
+              ${getExe pkgs.replace-secret} @cert@ "$CREDENTIALS_DIRECTORY/cert.pem" /run/coturn/turnserver.cfg
+              ${getExe pkgs.replace-secret} @pkey@ "$CREDENTIALS_DIRECTORY/pkey.pem" /run/coturn/turnserver.cfg
+            '');
+        in
+        (optionalAttrs (preStart' != "") { preStart = mkAfter preStart'; })
+        // (optionalAttrs cfg.useAcmeCertificates {
+          serviceConfig.LoadCredential = [
+            "cert.pem:${dir}/fullchain.pem"
+            "pkey.pem:${dir}/key.pem"
+          ];
+        });
+
+      security.acme.certs.${cfg.domain}.postRun = optionalString cfg.useAcmeCertificates "systemctl restart coturn.service";
+
+      networking.firewall = {
+        allowedUDPPorts = cfg.openPorts;
+        allowedTCPPorts = cfg.openPorts;
+
+        allowedUDPPortRanges = [
+          {
+            from = cfg.minPort;
+            to = cfg.maxPort;
+          }
+        ];
+      };
+    }
+  ]);
+}
diff --git a/nixos/modules/services/networking/netbird/dashboard.nix b/nixos/modules/services/networking/netbird/dashboard.nix
new file mode 100644
index 0000000000000..6fc3086155900
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/dashboard.nix
@@ -0,0 +1,186 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  inherit (lib)
+    boolToString
+    concatStringsSep
+    hasAttr
+    isBool
+    mapAttrs
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    ;
+
+  inherit (lib.types)
+    attrsOf
+    bool
+    either
+    package
+    str
+    submodule
+    ;
+
+  toStringEnv = value: if isBool value then boolToString value else toString value;
+
+  cfg = config.services.netbird.server.dashboard;
+in
+
+{
+  options.services.netbird.server.dashboard = {
+    enable = mkEnableOption "the static netbird dashboard frontend";
+
+    package = mkPackageOption pkgs "netbird-dashboard" { };
+
+    enableNginx = mkEnableOption "Nginx reverse-proxy to serve the dashboard.";
+
+    domain = mkOption {
+      type = str;
+      default = "localhost";
+      description = "The domain under which the dashboard runs.";
+    };
+
+    managementServer = mkOption {
+      type = str;
+      description = "The address of the management server, used for the API endpoints.";
+    };
+
+    settings = mkOption {
+      type = submodule { freeformType = attrsOf (either str bool); };
+
+      defaultText = ''
+        {
+          AUTH_AUDIENCE = "netbird";
+          AUTH_CLIENT_ID = "netbird";
+          AUTH_SUPPORTED_SCOPES = "openid profile email";
+          NETBIRD_TOKEN_SOURCE = "idToken";
+          USE_AUTH0 = false;
+        }
+      '';
+
+      description = ''
+        An attribute set that will be used to substitute variables when building the dashboard.
+        Any values set here will be templated into the frontend and be public for anyone that can reach your website.
+        The exact values sadly aren't documented anywhere.
+        A starting point when searching for valid values is this [script](https://github.com/netbirdio/dashboard/blob/main/docker/init_react_envs.sh)
+        The only mandatory value is 'AUTH_AUTHORITY' as we cannot set a default value here.
+      '';
+    };
+
+    finalDrv = mkOption {
+      readOnly = true;
+      type = package;
+      description = ''
+        The derivation containing the final templated dashboard.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = hasAttr "AUTH_AUTHORITY" cfg.settings;
+        message = "The setting AUTH_AUTHORITY is required for the dasboard to function.";
+      }
+    ];
+
+    services.netbird.server.dashboard = {
+      settings =
+        {
+          # Due to how the backend and frontend work this secret will be templated into the backend
+          # and then served statically from your website
+          # This enables you to login without the normally needed indirection through the backend
+          # but this also means anyone that can reach your website can
+          # fetch this secret, which is why there is no real need to put it into
+          # special options as its public anyway
+          # As far as I know leaking this secret is just
+          # an information leak as one can fetch some basic app
+          # informations from the IDP
+          # To actually do something one still needs to have login
+          # data and this secret so this being public will not
+          # suffice for anything just decreasing security
+          AUTH_CLIENT_SECRET = "";
+
+          NETBIRD_MGMT_API_ENDPOINT = cfg.managementServer;
+          NETBIRD_MGMT_GRPC_API_ENDPOINT = cfg.managementServer;
+        }
+        // (mapAttrs (_: mkDefault) {
+          # Those values have to be easily overridable
+          AUTH_AUDIENCE = "netbird"; # must be set for your devices to be able to log in
+          AUTH_CLIENT_ID = "netbird";
+          AUTH_SUPPORTED_SCOPES = "openid profile email";
+          NETBIRD_TOKEN_SOURCE = "idToken";
+          USE_AUTH0 = false;
+        });
+
+      # The derivation containing the templated dashboard
+      finalDrv =
+        pkgs.runCommand "netbird-dashboard"
+          {
+            nativeBuildInputs = [ pkgs.gettext ];
+            env = {
+              ENV_STR = concatStringsSep " " [
+                "$AUTH_AUDIENCE"
+                "$AUTH_AUTHORITY"
+                "$AUTH_CLIENT_ID"
+                "$AUTH_CLIENT_SECRET"
+                "$AUTH_REDIRECT_URI"
+                "$AUTH_SILENT_REDIRECT_URI"
+                "$AUTH_SUPPORTED_SCOPES"
+                "$NETBIRD_DRAG_QUERY_PARAMS"
+                "$NETBIRD_GOOGLE_ANALYTICS_ID"
+                "$NETBIRD_HOTJAR_TRACK_ID"
+                "$NETBIRD_MGMT_API_ENDPOINT"
+                "$NETBIRD_MGMT_GRPC_API_ENDPOINT"
+                "$NETBIRD_TOKEN_SOURCE"
+                "$USE_AUTH0"
+              ];
+            } // (mapAttrs (_: toStringEnv) cfg.settings);
+          }
+          ''
+            cp -R ${cfg.package} build
+
+            find build -type d -exec chmod 755 {} \;
+            OIDC_TRUSTED_DOMAINS="build/OidcTrustedDomains.js"
+
+            envsubst "$ENV_STR" < "$OIDC_TRUSTED_DOMAINS.tmpl" > "$OIDC_TRUSTED_DOMAINS"
+
+            for f in $(grep -R -l AUTH_SUPPORTED_SCOPES build/); do
+              mv "$f" "$f.copy"
+              envsubst "$ENV_STR" < "$f.copy" > "$f"
+              rm "$f.copy"
+            done
+
+            cp -R build $out
+          '';
+    };
+
+    services.nginx = mkIf cfg.enableNginx {
+      enable = true;
+
+      virtualHosts.${cfg.domain} = {
+        locations = {
+          "/" = {
+            root = cfg.finalDrv;
+            tryFiles = "$uri $uri.html $uri/ =404";
+          };
+
+          "/404.html".extraConfig = ''
+            internal;
+          '';
+        };
+
+        extraConfig = ''
+          error_page 404 /404.html;
+        '';
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/netbird/management.nix b/nixos/modules/services/networking/netbird/management.nix
new file mode 100644
index 0000000000000..52f033959143c
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/management.nix
@@ -0,0 +1,460 @@
+{
+  config,
+  lib,
+  pkgs,
+  utils,
+  ...
+}:
+
+let
+  inherit (lib)
+    any
+    concatMap
+    getExe'
+    literalExpression
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    optional
+    recursiveUpdate
+    ;
+
+  inherit (lib.types)
+    bool
+    enum
+    listOf
+    port
+    str
+    ;
+
+  inherit (utils) escapeSystemdExecArgs genJqSecretsReplacementSnippet;
+
+  stateDir = "/var/lib/netbird-mgmt";
+
+  settingsFormat = pkgs.formats.json { };
+
+  defaultSettings = {
+    Stuns = [
+      {
+        Proto = "udp";
+        URI = "stun:${cfg.turnDomain}:3478";
+        Username = "";
+        Password = null;
+      }
+    ];
+
+    TURNConfig = {
+      Turns = [
+        {
+          Proto = "udp";
+          URI = "turn:${cfg.turnDomain}:${builtins.toString cfg.turnPort}";
+          Username = "netbird";
+          Password = "netbird";
+        }
+      ];
+
+      CredentialsTTL = "12h";
+      Secret = "not-secure-secret";
+      TimeBasedCredentials = false;
+    };
+
+    Signal = {
+      Proto = "https";
+      URI = "${cfg.domain}:443";
+      Username = "";
+      Password = null;
+    };
+
+    ReverseProxy = {
+      TrustedHTTPProxies = [ ];
+      TrustedHTTPProxiesCount = 0;
+      TrustedPeers = [ "0.0.0.0/0" ];
+    };
+
+    Datadir = "${stateDir}/data";
+    DataStoreEncryptionKey = "very-insecure-key";
+    StoreConfig = {
+      Engine = "sqlite";
+    };
+
+    HttpConfig = {
+      Address = "127.0.0.1:${builtins.toString cfg.port}";
+      IdpSignKeyRefreshEnabled = true;
+      OIDCConfigEndpoint = cfg.oidcConfigEndpoint;
+    };
+
+    IdpManagerConfig = {
+      ManagerType = "none";
+      ClientConfig = {
+        Issuer = "";
+        TokenEndpoint = "";
+        ClientID = "netbird";
+        ClientSecret = "";
+        GrantType = "client_credentials";
+      };
+
+      ExtraConfig = { };
+      Auth0ClientCredentials = null;
+      AzureClientCredentials = null;
+      KeycloakClientCredentials = null;
+      ZitadelClientCredentials = null;
+    };
+
+    DeviceAuthorizationFlow = {
+      Provider = "none";
+      ProviderConfig = {
+        Audience = "netbird";
+        Domain = null;
+        ClientID = "netbird";
+        TokenEndpoint = null;
+        DeviceAuthEndpoint = "";
+        Scope = "openid profile email";
+        UseIDToken = false;
+      };
+    };
+
+    PKCEAuthorizationFlow = {
+      ProviderConfig = {
+        Audience = "netbird";
+        ClientID = "netbird";
+        ClientSecret = "";
+        AuthorizationEndpoint = "";
+        TokenEndpoint = "";
+        Scope = "openid profile email";
+        RedirectURLs = [ "http://localhost:53000" ];
+        UseIDToken = false;
+      };
+    };
+  };
+
+  managementConfig = recursiveUpdate defaultSettings cfg.settings;
+
+  managementFile = settingsFormat.generate "config.json" managementConfig;
+
+  cfg = config.services.netbird.server.management;
+in
+
+{
+  options.services.netbird.server.management = {
+    enable = mkEnableOption "Netbird Management Service.";
+
+    package = mkPackageOption pkgs "netbird" { };
+
+    domain = mkOption {
+      type = str;
+      description = "The domain under which the management API runs.";
+    };
+
+    turnDomain = mkOption {
+      type = str;
+      description = "The domain of the TURN server to use.";
+    };
+
+    turnPort = mkOption {
+      type = port;
+      default = 3478;
+      description = ''
+        The port of the TURN server to use.
+      '';
+    };
+
+    dnsDomain = mkOption {
+      type = str;
+      default = "netbird.selfhosted";
+      description = "Domain used for peer resolution.";
+    };
+
+    singleAccountModeDomain = mkOption {
+      type = str;
+      default = "netbird.selfhosted";
+      description = ''
+        Enables single account mode.
+        This means that all the users will be under the same account grouped by the specified domain.
+        If the installation has more than one account, the property is ineffective.
+      '';
+    };
+
+    disableAnonymousMetrics = mkOption {
+      type = bool;
+      default = true;
+      description = "Disables push of anonymous usage metrics to NetBird.";
+    };
+
+    disableSingleAccountMode = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        If set to true, disables single account mode.
+        The `singleAccountModeDomain` property will be ignored and every new user will have a separate NetBird account.
+      '';
+    };
+
+    port = mkOption {
+      type = port;
+      default = 8011;
+      description = "Internal port of the management server.";
+    };
+
+    extraOptions = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = ''
+        Additional options given to netbird-mgmt as commandline arguments.
+      '';
+    };
+
+    oidcConfigEndpoint = mkOption {
+      type = str;
+      description = "The oidc discovery endpoint.";
+      example = "https://example.eu.auth0.com/.well-known/openid-configuration";
+    };
+
+    settings = mkOption {
+      inherit (settingsFormat) type;
+
+      defaultText = literalExpression ''
+        defaultSettings = {
+          Stuns = [
+            {
+              Proto = "udp";
+              URI = "stun:''${cfg.turnDomain}:3478";
+              Username = "";
+              Password = null;
+            }
+          ];
+
+          TURNConfig = {
+            Turns = [
+              {
+                Proto = "udp";
+                URI = "turn:''${cfg.turnDomain}:3478";
+                Username = "netbird";
+                Password = "netbird";
+              }
+            ];
+
+            CredentialsTTL = "12h";
+            Secret = "not-secure-secret";
+            TimeBasedCredentials = false;
+          };
+
+          Signal = {
+            Proto = "https";
+            URI = "''${cfg.domain}:443";
+            Username = "";
+            Password = null;
+          };
+
+          ReverseProxy = {
+            TrustedHTTPProxies = [ ];
+            TrustedHTTPProxiesCount = 0;
+            TrustedPeers = [ "0.0.0.0/0" ];
+          };
+
+          Datadir = "''${stateDir}/data";
+          DataStoreEncryptionKey = "genEVP6j/Yp2EeVujm0zgqXrRos29dQkpvX0hHdEUlQ=";
+          StoreConfig = { Engine = "sqlite"; };
+
+          HttpConfig = {
+            Address = "127.0.0.1:''${builtins.toString cfg.port}";
+            IdpSignKeyRefreshEnabled = true;
+            OIDCConfigEndpoint = cfg.oidcConfigEndpoint;
+          };
+
+          IdpManagerConfig = {
+            ManagerType = "none";
+            ClientConfig = {
+              Issuer = "";
+              TokenEndpoint = "";
+              ClientID = "netbird";
+              ClientSecret = "";
+              GrantType = "client_credentials";
+            };
+
+            ExtraConfig = { };
+            Auth0ClientCredentials = null;
+            AzureClientCredentials = null;
+            KeycloakClientCredentials = null;
+            ZitadelClientCredentials = null;
+          };
+
+          DeviceAuthorizationFlow = {
+            Provider = "none";
+            ProviderConfig = {
+              Audience = "netbird";
+              Domain = null;
+              ClientID = "netbird";
+              TokenEndpoint = null;
+              DeviceAuthEndpoint = "";
+              Scope = "openid profile email offline_access api";
+              UseIDToken = false;
+            };
+          };
+
+          PKCEAuthorizationFlow = {
+            ProviderConfig = {
+              Audience = "netbird";
+              ClientID = "netbird";
+              ClientSecret = "";
+              AuthorizationEndpoint = "";
+              TokenEndpoint = "";
+              Scope = "openid profile email offline_access api";
+              RedirectURLs = "http://localhost:53000";
+              UseIDToken = false;
+            };
+          };
+        };
+      '';
+
+      default = { };
+
+      description = ''
+        Configuration of the netbird management server.
+        Options containing secret data should be set to an attribute set containing the attribute _secret
+        - a string pointing to a file containing the value the option should be set to.
+        See the example to get a better picture of this: in the resulting management.json file,
+        the `DataStoreEncryptionKey` key will be set to the contents of the /run/agenix/netbird_mgmt-data_store_encryption_key file.
+      '';
+
+      example = {
+        DataStoreEncryptionKey = {
+          _secret = "/run/agenix/netbird_mgmt-data_store_encryption_key";
+        };
+      };
+    };
+
+    logLevel = mkOption {
+      type = enum [
+        "ERROR"
+        "WARN"
+        "INFO"
+        "DEBUG"
+      ];
+      default = "INFO";
+      description = "Log level of the netbird services.";
+    };
+
+    enableNginx = mkEnableOption "Nginx reverse-proxy for the netbird management service.";
+  };
+
+  config = mkIf cfg.enable {
+    warnings =
+      concatMap
+        (
+          { check, name }:
+          optional check "${name} is world-readable in the Nix Store, you should provide it as a _secret."
+        )
+        [
+          {
+            check = builtins.isString managementConfig.TURNConfig.Secret;
+            name = "The TURNConfig.secret";
+          }
+          {
+            check = builtins.isString managementConfig.DataStoreEncryptionKey;
+            name = "The DataStoreEncryptionKey";
+          }
+          {
+            check = any (T: (T ? Password) && builtins.isString T.Password) managementConfig.TURNConfig.Turns;
+            name = "A Turn configuration's password";
+          }
+        ];
+
+    systemd.services.netbird-management = {
+      description = "The management server for Netbird, a wireguard VPN";
+      documentation = [ "https://netbird.io/docs/" ];
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ managementFile ];
+
+      preStart = genJqSecretsReplacementSnippet managementConfig "${stateDir}/management.json";
+
+      serviceConfig = {
+        ExecStart = escapeSystemdExecArgs (
+          [
+            (getExe' cfg.package "netbird-mgmt")
+            "management"
+            # Config file
+            "--config"
+            "${stateDir}/management.json"
+            # Data directory
+            "--datadir"
+            "${stateDir}/data"
+            # DNS domain
+            "--dns-domain"
+            cfg.dnsDomain
+            # Port to listen on
+            "--port"
+            cfg.port
+            # Log to stdout
+            "--log-file"
+            "console"
+            # Log level
+            "--log-level"
+            cfg.logLevel
+            #
+            "--idp-sign-key-refresh-enabled"
+            # Domain for internal resolution
+            "--single-account-mode-domain"
+            cfg.singleAccountModeDomain
+          ]
+          ++ (optional cfg.disableAnonymousMetrics "--disable-anonymous-metrics")
+          ++ (optional cfg.disableSingleAccountMode "--disable-single-account-mode")
+          ++ cfg.extraOptions
+        );
+        Restart = "always";
+        RuntimeDirectory = "netbird-mgmt";
+        StateDirectory = [
+          "netbird-mgmt"
+          "netbird-mgmt/data"
+        ];
+        WorkingDirectory = stateDir;
+
+        # hardening
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
+
+      stopIfChanged = false;
+    };
+
+    services.nginx = mkIf cfg.enableNginx {
+      enable = true;
+
+      virtualHosts.${cfg.domain} = {
+        locations = {
+          "/api".proxyPass = "http://localhost:${builtins.toString cfg.port}";
+
+          "/management.ManagementService/".extraConfig = ''
+            # This is necessary so that grpc connections do not get closed early
+            # see https://stackoverflow.com/a/67805465
+            client_body_timeout 1d;
+
+            grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+            grpc_pass grpc://localhost:${builtins.toString cfg.port};
+            grpc_read_timeout 1d;
+            grpc_send_timeout 1d;
+            grpc_socket_keepalive on;
+          '';
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/netbird/server.md b/nixos/modules/services/networking/netbird/server.md
new file mode 100644
index 0000000000000..3649e97b379e5
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/server.md
@@ -0,0 +1,42 @@
+# Netbird server {#module-services-netbird-server}
+
+NetBird is a VPN built on top of WireGuard® making it easy to create secure private networks for your organization or home.
+
+## Quickstart {#module-services-netbird-server-quickstart}
+
+To fully setup Netbird as a self-hosted server, we need both a Coturn server and an identity provider, the list of supported SSOs and their setup are available [on Netbird's documentation](https://docs.netbird.io/selfhosted/selfhosted-guide#step-3-configure-identity-provider-idp).
+
+There are quite a few settings that need to be passed to Netbird for it to function, and a minimal config looks like :
+
+```nix
+services.netbird.server = {
+  enable = true;
+
+  domain = "netbird.example.selfhosted";
+
+  enableNginx = true;
+
+  coturn = {
+    enable = true;
+
+    passwordFile = "/path/to/a/secret/password";
+  };
+
+  management = {
+    oidcConfigEndpoint = "https://sso.example.selfhosted/oauth2/openid/netbird/.well-known/openid-configuration";
+
+    settings = {
+      TURNConfig = {
+        Turns = [
+          {
+            Proto = "udp";
+            URI = "turn:netbird.example.selfhosted:3478";
+            Username = "netbird";
+            Password._secret = "/path/to/a/secret/password";
+          }
+        ];
+      };
+    };
+  };
+};
+```
diff --git a/nixos/modules/services/networking/netbird/server.nix b/nixos/modules/services/networking/netbird/server.nix
new file mode 100644
index 0000000000000..a4de0fda6a134
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/server.nix
@@ -0,0 +1,67 @@
+{ config, lib, ... }:
+
+let
+  inherit (lib)
+    mkEnableOption
+    mkIf
+    mkOption
+    optionalAttrs
+    ;
+
+  inherit (lib.types) str;
+
+  cfg = config.services.netbird.server;
+in
+
+{
+  meta = {
+    maintainers = with lib.maintainers; [ thubrecht ];
+    doc = ./server.md;
+  };
+
+  # Import the separate components
+  imports = [
+    ./coturn.nix
+    ./dashboard.nix
+    ./management.nix
+    ./signal.nix
+  ];
+
+  options.services.netbird.server = {
+    enable = mkEnableOption "Netbird Server stack, comprising the dashboard, management API and signal service";
+
+    enableNginx = mkEnableOption "Nginx reverse-proxy for the netbird server services.";
+
+    domain = mkOption {
+      type = str;
+      description = "The domain under which the netbird server runs.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.netbird.server = {
+      dashboard = {
+        inherit (cfg) enable domain enableNginx;
+
+        managementServer = "https://${cfg.domain}";
+      };
+
+      management =
+        {
+          inherit (cfg) enable domain enableNginx;
+        }
+        // (optionalAttrs cfg.coturn.enable {
+          turnDomain = cfg.domain;
+          turnPort = config.services.coturn.tls-listening-port;
+        });
+
+      signal = {
+        inherit (cfg) enable domain enableNginx;
+      };
+
+      coturn = {
+        inherit (cfg) domain;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/netbird/signal.nix b/nixos/modules/services/networking/netbird/signal.nix
new file mode 100644
index 0000000000000..8408d20e874b5
--- /dev/null
+++ b/nixos/modules/services/networking/netbird/signal.nix
@@ -0,0 +1,123 @@
+{
+  config,
+  lib,
+  pkgs,
+  utils,
+  ...
+}:
+
+let
+  inherit (lib)
+    getExe'
+    mkEnableOption
+    mkIf
+    mkPackageOption
+    mkOption
+    ;
+
+  inherit (lib.types) enum port str;
+
+  inherit (utils) escapeSystemdExecArgs;
+
+  cfg = config.services.netbird.server.signal;
+in
+
+{
+  options.services.netbird.server.signal = {
+    enable = mkEnableOption "Netbird's Signal Service";
+
+    package = mkPackageOption pkgs "netbird" { };
+
+    enableNginx = mkEnableOption "Nginx reverse-proxy for the netbird signal service.";
+
+    domain = mkOption {
+      type = str;
+      description = "The domain name for the signal service.";
+    };
+
+    port = mkOption {
+      type = port;
+      default = 8012;
+      description = "Internal port of the signal server.";
+    };
+
+    logLevel = mkOption {
+      type = enum [
+        "ERROR"
+        "WARN"
+        "INFO"
+        "DEBUG"
+      ];
+      default = "INFO";
+      description = "Log level of the netbird signal service.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.netbird-signal = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = escapeSystemdExecArgs [
+          (getExe' cfg.package "netbird-signal")
+          "run"
+          # Port to listen on
+          "--port"
+          cfg.port
+          # Log to stdout
+          "--log-file"
+          "console"
+          # Log level
+          "--log-level"
+          cfg.logLevel
+        ];
+
+        Restart = "always";
+        RuntimeDirectory = "netbird-mgmt";
+        StateDirectory = "netbird-mgmt";
+        WorkingDirectory = "/var/lib/netbird-mgmt";
+
+        # hardening
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
+
+      stopIfChanged = false;
+    };
+
+    services.nginx = mkIf cfg.enableNginx {
+      enable = true;
+
+      virtualHosts.${cfg.domain} = {
+        locations."/signalexchange.SignalExchange/".extraConfig = ''
+          # This is necessary so that grpc connections do not get closed early
+          # see https://stackoverflow.com/a/67805465
+          client_body_timeout 1d;
+
+          grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+          grpc_pass grpc://localhost:${builtins.toString cfg.port};
+          grpc_read_timeout 1d;
+          grpc_send_timeout 1d;
+          grpc_socket_keepalive on;
+        '';
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/sunshine.nix b/nixos/modules/services/networking/sunshine.nix
new file mode 100644
index 0000000000000..c115b9cd5cf99
--- /dev/null
+++ b/nixos/modules/services/networking/sunshine.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, utils, ... }:
+let
+  inherit (lib) mkEnableOption mkPackageOption mkOption mkIf mkDefault types optionals getExe;
+  inherit (utils) escapeSystemdExecArgs;
+  cfg = config.services.sunshine;
+
+  # ports used are offset from a single base port, see https://docs.lizardbyte.dev/projects/sunshine/en/latest/about/advanced_usage.html#port
+  generatePorts = port: offsets: map (offset: port + offset) offsets;
+  defaultPort = 47989;
+
+  appsFormat = pkgs.formats.json { };
+  settingsFormat = pkgs.formats.keyValue { };
+
+  appsFile = appsFormat.generate "apps.json" cfg.applications;
+  configFile = settingsFormat.generate "sunshine.conf" cfg.settings;
+in
+{
+  options.services.sunshine = with types; {
+    enable = mkEnableOption "Sunshine, a self-hosted game stream host for Moonlight";
+    package = mkPackageOption pkgs "sunshine" { };
+    openFirewall = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to automatically open ports in the firewall.
+      '';
+    };
+    capSysAdmin = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to give the Sunshine binary CAP_SYS_ADMIN, required for DRM/KMS screen capture.
+      '';
+    };
+    settings = mkOption {
+      default = { };
+      description = ''
+        Settings to be rendered into the configuration file. If this is set, no configuration is possible from the web UI.
+
+        See https://docs.lizardbyte.dev/projects/sunshine/en/latest/about/advanced_usage.html#configuration for syntax.
+      '';
+      example = ''
+        {
+          sunshine_name = "nixos";
+        }
+      '';
+      type = submodule (settings: {
+        freeformType = settingsFormat.type;
+        options.port = mkOption {
+          type = port;
+          default = defaultPort;
+          description = ''
+            Base port -- others used are offset from this one, see https://docs.lizardbyte.dev/projects/sunshine/en/latest/about/advanced_usage.html#port for details.
+          '';
+        };
+      });
+    };
+    applications = mkOption {
+      default = { };
+      description = ''
+        Configuration for applications to be exposed to Moonlight. If this is set, no configuration is possible from the web UI, and must be by the `settings` option.
+      '';
+      example = ''
+        {
+          env = {
+            PATH = "$(PATH):$(HOME)/.local/bin";
+          };
+          apps = [
+            {
+              name = "1440p Desktop";
+              prep-cmd = [
+                {
+                  do = "''${pkgs.kdePackages.libkscreen}/bin/kscreen-doctor output.DP-4.mode.2560x1440@144";
+                  undo = "''${pkgs.kdePackages.libkscreen}/bin/kscreen-doctor output.DP-4.mode.3440x1440@144";
+                }
+              ];
+              exclude-global-prep-cmd = "false";
+              auto-detach = "true";
+            }
+          ];
+        }
+      '';
+      type = submodule {
+        options = {
+          env = mkOption {
+            default = { };
+            description = ''
+              Environment variables to be set for the applications.
+            '';
+            type = attrsOf str;
+          };
+          apps = mkOption {
+            default = [ ];
+            description = ''
+              Applications to be exposed to Moonlight.
+            '';
+            type = listOf attrs;
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.sunshine.settings.file_apps = mkIf (cfg.applications.apps != [ ]) "${appsFile}";
+
+    environment.systemPackages = [
+      cfg.package
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = generatePorts cfg.settings.port [ (-5) 0 1 21 ];
+      allowedUDPPorts = generatePorts cfg.settings.port [ 9 10 11 13 21 ];
+    };
+
+    boot.kernelModules = [ "uinput" ];
+
+    services.udev.packages = [ cfg.package ];
+
+    services.avahi = {
+      enable = mkDefault true;
+      publish = {
+        enable = mkDefault true;
+        userServices = mkDefault true;
+      };
+    };
+
+    security.wrappers.sunshine = mkIf cfg.capSysAdmin {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_sys_admin+p";
+      source = getExe cfg.package;
+    };
+
+    systemd.user.services.sunshine = {
+      description = "Self-hosted game stream host for Moonlight";
+
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      wants = [ "graphical-session.target" ];
+      after = [ "graphical-session.target" ];
+
+      startLimitIntervalSec = 500;
+      startLimitBurst = 5;
+
+      serviceConfig = {
+        # only add configFile if an application or a setting other than the default port is set to allow configuration from web UI
+        ExecStart = escapeSystemdExecArgs ([
+          (if cfg.capSysAdmin then "${config.security.wrapperDir}/sunshine" else "${getExe cfg.package}")
+        ] ++ optionals (cfg.applications.apps != [ ] || (builtins.length (builtins.attrNames cfg.settings) > 1 || cfg.settings.port != defaultPort)) [ "${configFile}" ]);
+        Restart = "on-failure";
+        RestartSec = "5s";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index c9dd1d1b0f01f..435cd530c18d4 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -124,11 +124,20 @@ let
           fi
         ''}
 
+        # ensure wpa_supplicant.conf exists, or the daemon will fail to start
+        ${optionalString cfg.allowAuxiliaryImperativeNetworks ''
+          touch /etc/wpa_supplicant.conf
+        ''}
+
         # substitute environment variables
         if [ -f "${configFile}" ]; then
           ${pkgs.gawk}/bin/awk '{
-            for(varname in ENVIRON)
-              gsub("@"varname"@", ENVIRON[varname])
+            for(varname in ENVIRON) {
+              find = "@"varname"@"
+              repl = ENVIRON[varname]
+              if (i = index($0, find))
+                $0 = substr($0, 1, i-1) repl substr($0, i+length(find))
+            }
             print
           }' "${configFile}" > "${finalConfig}"
         else
diff --git a/nixos/modules/services/search/manticore.nix b/nixos/modules/services/search/manticore.nix
new file mode 100644
index 0000000000000..a8fcd9d0b3820
--- /dev/null
+++ b/nixos/modules/services/search/manticore.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.manticore;
+  format = pkgs.formats.json { };
+
+  toSphinx = {
+    mkKeyValue    ? mkKeyValueDefault {} "=",
+    listsAsDuplicateKeys ? true
+  }: attrsOfAttrs:
+    let
+        # map function to string for each key val
+        mapAttrsToStringsSep = sep: mapFn: attrs:
+          concatStringsSep sep
+            (mapAttrsToList mapFn attrs);
+        mkSection = sectName: sectValues: ''
+          ${sectName} {
+        '' + lib.generators.toKeyValue { inherit mkKeyValue listsAsDuplicateKeys; } sectValues + ''}'';
+    in
+      # map input to ini sections
+      mapAttrsToStringsSep "\n" mkSection attrsOfAttrs;
+
+  configFile = pkgs.writeText "manticore.conf" (
+    toSphinx {
+        mkKeyValue = k: v: "  ${k} = ${v}";
+    } cfg.settings
+  );
+
+in {
+
+  options = {
+    services.manticore = {
+
+      enable = mkEnableOption "Manticoresearch";
+
+      settings = mkOption {
+        default = {
+          searchd = {
+            listen = [
+              "127.0.0.1:9312"
+              "127.0.0.1:9306:mysql"
+              "127.0.0.1:9308:http"
+            ];
+            log = "/var/log/manticore/searchd.log";
+            query_log = "/var/log/manticore/query.log";
+            pid_file = "/run/manticore/searchd.pid";
+            data_dir = "/var/lib/manticore";
+          };
+        };
+        description = ''
+          Configuration for Manticoresearch. See
+          <https://manual.manticoresearch.com/Server%20settings>
+          for more information.
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+        };
+        example = literalExpression ''
+          {
+            searchd = {
+                listen = [
+                  "127.0.0.1:9312"
+                  "127.0.0.1:9306:mysql"
+                  "127.0.0.1:9308:http"
+                ];
+                log = "/var/log/manticore/searchd.log";
+                query_log = "/var/log/manticore/query.log";
+                pid_file = "/run/manticore/searchd.pid";
+                data_dir = "/var/lib/manticore";
+            };
+          }
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      packages = [ pkgs.manticoresearch ];
+      services.manticore = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = [
+            ""
+            "${pkgs.manticoresearch}/bin/searchd --config ${configFile}"
+          ];
+          ExecStop = [
+            ""
+            "${pkgs.manticoresearch}/bin/searchd --config ${configFile} --stopwait"
+          ];
+          ExecStartPre = [ "" ];
+          DynamicUser = true;
+          LogsDirectory = "manticore";
+          RuntimeDirectory = "manticore";
+          StateDirectory = "manticore";
+          ReadWritePaths = "";
+          CapabilityBoundingSet = "";
+          RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          PrivateDevices = true;
+          PrivateUsers = true;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+          RestrictRealtime = true;
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          UMask = "0066";
+          ProtectHostname = true;
+        } // lib.optionalAttrs (cfg.settings.searchd.pid_file != null) {
+          PIDFile = cfg.settings.searchd.pid_file;
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index abf1ce9ba0200..d2992a196bf87 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -47,6 +47,7 @@ let
     reverse-proxy = reverseProxy;
     proxy-prefix = proxyPrefix;
     profile-url = profileURL;
+    oidc-issuer-url = oidcIssuerUrl;
     redeem-url = redeemURL;
     redirect-url = redirectURL;
     request-logging = requestLogging;
@@ -131,6 +132,15 @@ in
       example = "123456.apps.googleusercontent.com";
     };
 
+    oidcIssuerUrl = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        The OAuth issuer URL.
+      '';
+      example = "https://login.microsoftonline.com/{TENANT_ID}/v2.0";
+    };
+
     clientSecret = mkOption {
       type = types.nullOr types.str;
       description = ''
diff --git a/nixos/modules/services/system/earlyoom.nix b/nixos/modules/services/system/earlyoom.nix
index bcdf7d6512d5a..7e012dee02cbf 100644
--- a/nixos/modules/services/system/earlyoom.nix
+++ b/nixos/modules/services/system/earlyoom.nix
@@ -4,15 +4,29 @@ let
   cfg = config.services.earlyoom;
 
   inherit (lib)
-    mkDefault mkEnableOption mkIf mkOption types
-    mkRemovedOptionModule literalExpression
-    escapeShellArg concatStringsSep optional optionalString;
-
+    concatStringsSep
+    escapeShellArg
+    literalExpression
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    mkRemovedOptionModule
+    optionalString
+    optionals
+    types;
 in
 {
+  meta = {
+    maintainers = with lib.maintainers; [ AndersonTorres ];
+  };
+
   options.services.earlyoom = {
     enable = mkEnableOption "early out of memory killing";
 
+    package = mkPackageOption pkgs "earlyoom" { };
+
     freeMemThreshold = mkOption {
       type = types.ints.between 1 100;
       default = 10;
@@ -138,22 +152,21 @@ in
     systemd.services.earlyoom = {
       description = "Early OOM Daemon for Linux";
       wantedBy = [ "multi-user.target" ];
-      path = optional cfg.enableNotifications pkgs.dbus;
+      path = optionals cfg.enableNotifications [ pkgs.dbus ];
       serviceConfig = {
         StandardError = "journal";
         ExecStart = concatStringsSep " " ([
-          "${pkgs.earlyoom}/bin/earlyoom"
+          "${lib.getExe cfg.package}"
           ("-m ${toString cfg.freeMemThreshold}"
-            + optionalString (cfg.freeMemKillThreshold != null) ",${toString cfg.freeMemKillThreshold}")
+           + optionalString (cfg.freeMemKillThreshold != null) ",${toString cfg.freeMemKillThreshold}")
           ("-s ${toString cfg.freeSwapThreshold}"
-            + optionalString (cfg.freeSwapKillThreshold != null) ",${toString cfg.freeSwapKillThreshold}")
+           + optionalString (cfg.freeSwapKillThreshold != null) ",${toString cfg.freeSwapKillThreshold}")
           "-r ${toString cfg.reportInterval}"
         ]
-        ++ optional cfg.enableDebugInfo "-d"
-        ++ optional cfg.enableNotifications "-n"
-        ++ optional (cfg.killHook != null) "-N ${escapeShellArg cfg.killHook}"
-        ++ cfg.extraArgs
-        );
+        ++ optionals cfg.enableDebugInfo [ "-d" ]
+        ++ optionals cfg.enableNotifications [ "-n" ]
+        ++ optionals (cfg.killHook != null) [ "-N ${escapeShellArg cfg.killHook}" ]
+        ++ cfg.extraArgs);
       };
     };
   };
diff --git a/nixos/modules/services/web-apps/coder.nix b/nixos/modules/services/web-apps/coder.nix
index 318a7c8fc1357..d4a5b7b2b89cd 100644
--- a/nixos/modules/services/web-apps/coder.nix
+++ b/nixos/modules/services/web-apps/coder.nix
@@ -169,7 +169,7 @@ in {
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
 
-      environment = config.environment.extra // {
+      environment = cfg.environment.extra // {
         CODER_ACCESS_URL = cfg.accessUrl;
         CODER_WILDCARD_ACCESS_URL = cfg.wildcardAccessUrl;
         CODER_PG_CONNECTION_URL = "user=${cfg.database.username} ${optionalString (cfg.database.password != null) "password=${cfg.database.password}"} database=${cfg.database.database} host=${cfg.database.host} ${optionalString (cfg.database.sslmode != null) "sslmode=${cfg.database.sslmode}"}";
diff --git a/nixos/modules/services/web-apps/firefly-iii.nix b/nixos/modules/services/web-apps/firefly-iii.nix
new file mode 100644
index 0000000000000..b0024ce09c38e
--- /dev/null
+++ b/nixos/modules/services/web-apps/firefly-iii.nix
@@ -0,0 +1,367 @@
+{ pkgs, config, lib, ... }:
+
+let
+  inherit (lib) optionalString mkDefault mkIf mkOption mkEnableOption literalExpression;
+  inherit (lib.types) nullOr attrsOf oneOf str int bool path package enum submodule;
+  inherit (lib.strings) concatMapStringsSep removePrefix toShellVars removeSuffix hasSuffix;
+  inherit (lib.attrsets) attrValues genAttrs filterAttrs mapAttrs' nameValuePair;
+  inherit (builtins) isInt isString toString typeOf;
+
+  cfg = config.services.firefly-iii;
+
+  user = cfg.user;
+  group = cfg.group;
+
+  defaultUser = "firefly-iii";
+  defaultGroup = "firefly-iii";
+
+  artisan = "${cfg.package}/artisan";
+
+  env-file-values = mapAttrs' (n: v: nameValuePair (removeSuffix "_FILE" n) v)
+    (filterAttrs (n: v: hasSuffix "_FILE" n) cfg.settings);
+  env-nonfile-values = filterAttrs (n: v: ! hasSuffix "_FILE" n) cfg.settings;
+
+  envfile = pkgs.writeText "firefly-iii-env" ''
+    ${toShellVars env-file-values}
+    ${toShellVars env-nonfile-values}
+  '';
+
+  fileenv-func = ''
+    cp --no-preserve=mode ${envfile} /tmp/firefly-iii-env
+    ${concatMapStringsSep "\n"
+      (n: "${pkgs.replace-secret}/bin/replace-secret ${n} ${n} /tmp/firefly-iii-env")
+      (attrValues env-file-values)}
+    set -a
+    . /tmp/firefly-iii-env
+    set +a
+  '';
+
+  firefly-iii-maintenance = pkgs.writeShellScript "firefly-iii-maintenance.sh" ''
+    ${fileenv-func}
+
+    ${optionalString (cfg.settings.DB_CONNECTION == "sqlite")
+      "touch ${cfg.dataDir}/storage/database/database.sqlite"}
+    ${artisan} migrate --seed --no-interaction --force
+    ${artisan} firefly-iii:decrypt-all
+    ${artisan} firefly-iii:upgrade-database
+    ${artisan} firefly-iii:correct-database
+    ${artisan} firefly-iii:report-integrity
+    ${artisan} firefly-iii:laravel-passport-keys
+    ${artisan} cache:clear
+
+    mv /tmp/firefly-iii-env /run/phpfpm/firefly-iii-env
+  '';
+
+  commonServiceConfig = {
+    Type = "oneshot";
+    User = user;
+    Group = group;
+    StateDirectory = "${removePrefix "/var/lib/" cfg.dataDir}";
+    WorkingDirectory = cfg.package;
+    PrivateTmp = true;
+    PrivateDevices = true;
+    CapabilityBoundingSet = "";
+    AmbientCapabilities = "";
+    ProtectSystem = "strict";
+    ProtectKernelTunables = true;
+    ProtectKernelModules = true;
+    ProtectControlGroups = true;
+    ProtectClock = true;
+    ProtectHostname = true;
+    ProtectHome = "tmpfs";
+    ProtectKernelLogs = true;
+    ProtectProc = "invisible";
+    ProcSubset = "pid";
+    PrivateNetwork = false;
+    RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
+    SystemCallArchitectures = "native";
+    SystemCallFilter = [
+      "@system-service @resources"
+      "~@obsolete @privileged"
+    ];
+    RestrictSUIDSGID = true;
+    RemoveIPC = true;
+    NoNewPrivileges = true;
+    RestrictRealtime = true;
+    RestrictNamespaces = true;
+    LockPersonality = true;
+    PrivateUsers = true;
+  };
+
+in {
+
+  options.services.firefly-iii = {
+
+    enable = mkEnableOption "Firefly III: A free and open source personal finance manager";
+
+    user = mkOption {
+      type = str;
+      default = defaultUser;
+      description = "User account under which firefly-iii runs.";
+    };
+
+    group = mkOption {
+      type = str;
+      default = if cfg.enableNginx then "nginx" else defaultGroup;
+      defaultText = "If `services.firefly-iii.enableNginx` is true then `nginx` else ${defaultGroup}";
+      description = ''
+        Group under which firefly-iii runs. It is best to set this to the group
+        of whatever webserver is being used as the frontend.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = path;
+      default = "/var/lib/firefly-iii";
+      description = ''
+        The place where firefly-iii stores its state.
+      '';
+    };
+
+    package = mkOption {
+      type = package;
+      default = pkgs.firefly-iii;
+      defaultText = literalExpression "pkgs.firefly-iii";
+      description = ''
+        The firefly-iii package served by php-fpm and the webserver of choice.
+        This option can be used to point the webserver to the correct root. It
+        may also be used to set the package to a different version, say a
+        development version.
+      '';
+      apply = firefly-iii : firefly-iii.override (prev: {
+        dataDir = cfg.dataDir;
+      });
+    };
+
+    enableNginx = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to enable nginx or not. If enabled, an nginx virtual host will
+        be created for access to firefly-iii. If not enabled, then you may use
+        `''${config.services.firefly-iii.package}` as your document root in
+        whichever webserver you wish to setup.
+      '';
+    };
+
+    virtualHost = mkOption {
+      type = str;
+      description = ''
+        The hostname at which you wish firefly-iii to be served. If you have
+        enabled nginx using `services.firefly-iii.enableNginx` then this will
+        be used.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = ''
+        Options for the Firefly III PHP pool. See the documentation on <literal>php-fpm.conf</literal>
+        for details on configuration directives.
+      '';
+    };
+
+    settings = mkOption {
+      description = ''
+        Options for firefly-iii configuration. Refer to
+        <https://github.com/firefly-iii/firefly-iii/blob/main/.env.example> for
+        details on supported values. All <option>_FILE values supported by
+        upstream are supported here.
+
+        APP_URL will be set by `services.firefly-iii.virtualHost`, do not
+        redefine it here.
+      '';
+      example = literalExpression ''
+        {
+          APP_ENV = "production";
+          APP_KEY_FILE = "/var/secrets/firefly-iii-app-key.txt";
+          SITE_OWNER = "mail@example.com";
+          DB_CONNECTION = "mysql";
+          DB_HOST = "db";
+          DB_PORT = 3306;
+          DB_DATABASE = "firefly";
+          DB_USERNAME = "firefly";
+          DB_PASSWORD_FILE = "/var/secrets/firefly-iii-mysql-password.txt;
+        }
+      '';
+      default = {};
+      type = submodule {
+        freeformType = attrsOf (oneOf [str int bool]);
+        options = {
+          DB_CONNECTION = mkOption {
+            type = enum [ "sqlite" "pgsql" "mysql" ];
+            default = "sqlite";
+            example = "pgsql";
+            description = ''
+              The type of database you wish to use. Can be one of "sqlite",
+              "mysql" or "pgsql".
+            '';
+          };
+          APP_ENV = mkOption {
+            type = enum [ "local" "production" "testing" ];
+            default = "local";
+            example = "production";
+            description = ''
+              The app environment. It is recommended to keep this at "local".
+              Possible values are "local", "production" and "testing"
+            '';
+          };
+          DB_PORT = mkOption {
+            type = nullOr int;
+            default = if cfg.settings.DB_CONNECTION == "sqlite" then null
+                      else if cfg.settings.DB_CONNECTION == "mysql" then 3306
+                      else 5432;
+            defaultText = ''
+              `null` if DB_CONNECTION is "sqlite", `3306` if "mysql", `5432` if "pgsql"
+            '';
+            description = ''
+              The port your database is listening at. sqlite does not require
+              this value to be filled.
+            '';
+          };
+          APP_KEY_FILE = mkOption {
+            type = path;
+            description = ''
+              The path to your appkey. The file should contain a 32 character
+              random app key. This may be set using `echo "base64:$(head -c 32
+              /dev/urandom | base64)" > /path/to/key-file`.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.firefly-iii = {
+      settings = {
+        APP_URL = cfg.virtualHost;
+      };
+    };
+
+    services.phpfpm.pools.firefly-iii = {
+      inherit user group;
+      phpPackage = cfg.package.phpPackage;
+      phpOptions = ''
+        log_errors = on
+      '';
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = user;
+        "listen.group" = group;
+        "clear_env" = "no";
+      } // cfg.poolConfig;
+    };
+
+    systemd.services.phpfpm-firefly-iii.serviceConfig = {
+      EnvironmentFile = "/run/phpfpm/firefly-iii-env";
+      ExecStartPost = "${pkgs.coreutils}/bin/rm /run/phpfpm/firefly-iii-env";
+    };
+
+    systemd.services.firefly-iii-setup = {
+      requiredBy = [ "phpfpm-firefly-iii.service" ];
+      before = [ "phpfpm-firefly-iii.service" ];
+      serviceConfig = {
+        ExecStart = firefly-iii-maintenance;
+        RuntimeDirectory = "phpfpm";
+        RuntimeDirectoryPreserve = true;
+      } // commonServiceConfig;
+      unitConfig.JoinsNamespaceOf = "phpfpm-firefly-iii.service";
+    };
+
+    systemd.services.firefly-iii-cron = {
+      description = "Daily Firefly III cron job";
+      script = ''
+        ${fileenv-func}
+        ${artisan} firefly-iii:cron
+      '';
+      serviceConfig = commonServiceConfig;
+    };
+
+    systemd.timers.firefly-iii-cron = {
+      description = "Trigger Firefly Cron";
+      timerConfig = {
+        OnCalendar = "Daily";
+        RandomizedDelaySec = "1800s";
+        Persistent = true;
+      };
+      wantedBy = [ "timers.target" ];
+    };
+
+    services.nginx = mkIf cfg.enableNginx {
+      enable = true;
+      recommendedTlsSettings = mkDefault true;
+      recommendedOptimisation = mkDefault true;
+      recommendedGzipSettings = mkDefault true;
+      virtualHosts.${cfg.virtualHost} = {
+        root = "${cfg.package}/public";
+        locations = {
+          "/" = {
+            tryFiles = "$uri $uri/ /index.php?$query_string";
+            index = "index.php";
+            extraConfig = ''
+              sendfile off;
+            '';
+          };
+          "~ \.php$" = {
+            extraConfig = ''
+              include ${config.services.nginx.package}/conf/fastcgi_params ;
+              fastcgi_param SCRIPT_FILENAME $request_filename;
+              fastcgi_param modHeadersAvailable true; #Avoid sending the security headers twice
+              fastcgi_pass unix:${config.services.phpfpm.pools.firefly-iii.socket};
+            '';
+          };
+        };
+      };
+    };
+
+    systemd.tmpfiles.settings."10-firefly-iii" = genAttrs [
+      "${cfg.dataDir}/storage"
+      "${cfg.dataDir}/storage/app"
+      "${cfg.dataDir}/storage/database"
+      "${cfg.dataDir}/storage/export"
+      "${cfg.dataDir}/storage/framework"
+      "${cfg.dataDir}/storage/framework/cache"
+      "${cfg.dataDir}/storage/framework/sessions"
+      "${cfg.dataDir}/storage/framework/views"
+      "${cfg.dataDir}/storage/logs"
+      "${cfg.dataDir}/storage/upload"
+      "${cfg.dataDir}/cache"
+    ] (n: {
+      d = {
+        group = group;
+        mode = "0700";
+        user = user;
+      };
+    }) // {
+      "${cfg.dataDir}".d = {
+        group = group;
+        mode = "0710";
+        user = user;
+      };
+    };
+
+    users = {
+      users = mkIf (user == defaultUser) {
+        ${defaultUser} = {
+          description = "Firefly-iii service user";
+          inherit group;
+          isSystemUser = true;
+          home = cfg.dataDir;
+        };
+      };
+      groups = mkIf (group == defaultGroup) {
+        ${defaultGroup} = {};
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/limesurvey.nix b/nixos/modules/services/web-apps/limesurvey.nix
index 0d0361584c3a0..cdd60f572b990 100644
--- a/nixos/modules/services/web-apps/limesurvey.nix
+++ b/nixos/modules/services/web-apps/limesurvey.nix
@@ -2,7 +2,7 @@
 
 let
 
-  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption;
+  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption mkPackageOption;
   inherit (lib) literalExpression mapAttrs optional optionalString types;
 
   cfg = config.services.limesurvey;
@@ -12,8 +12,6 @@ let
   group = config.services.httpd.group;
   stateDir = "/var/lib/limesurvey";
 
-  pkg = pkgs.limesurvey;
-
   configType = with types; oneOf [ (attrsOf configType) str int bool ] // {
     description = "limesurvey config type (str, int, bool or attribute set thereof)";
   };
@@ -34,6 +32,8 @@ in
   options.services.limesurvey = {
     enable = mkEnableOption "Limesurvey web application";
 
+    package = mkPackageOption pkgs "limesurvey" { };
+
     encryptionKey = mkOption {
       type = types.str;
       default = "E17687FC77CEE247F0E22BB3ECF27FDE8BEC310A892347EC13013ABA11AA7EB5";
@@ -240,7 +240,7 @@ in
       adminAddr = mkDefault cfg.virtualHost.adminAddr;
       extraModules = [ "proxy_fcgi" ];
       virtualHosts.${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost {
-        documentRoot = mkForce "${pkg}/share/limesurvey";
+        documentRoot = mkForce "${cfg.package}/share/limesurvey";
         extraConfig = ''
           Alias "/tmp" "${stateDir}/tmp"
           <Directory "${stateDir}">
@@ -256,7 +256,7 @@ in
             Options -Indexes
           </Directory>
 
-          <Directory "${pkg}/share/limesurvey">
+          <Directory "${cfg.package}/share/limesurvey">
             <FilesMatch "\.php$">
               <If "-f %{REQUEST_FILENAME}">
                 SetHandler "proxy:unix:${fpm.socket}|fcgi://localhost/"
@@ -277,7 +277,7 @@ in
       "d ${stateDir}/tmp/assets 0750 ${user} ${group} - -"
       "d ${stateDir}/tmp/runtime 0750 ${user} ${group} - -"
       "d ${stateDir}/tmp/upload 0750 ${user} ${group} - -"
-      "C ${stateDir}/upload 0750 ${user} ${group} - ${pkg}/share/limesurvey/upload"
+      "C ${stateDir}/upload 0750 ${user} ${group} - ${cfg.package}/share/limesurvey/upload"
     ];
 
     systemd.services.limesurvey-init = {
@@ -288,8 +288,8 @@ in
       environment.LIMESURVEY_CONFIG = limesurveyConfig;
       script = ''
         # update or install the database as required
-        ${pkgs.php81}/bin/php ${pkg}/share/limesurvey/application/commands/console.php updatedb || \
-        ${pkgs.php81}/bin/php ${pkg}/share/limesurvey/application/commands/console.php install admin password admin admin@example.com verbose
+        ${pkgs.php81}/bin/php ${cfg.package}/share/limesurvey/application/commands/console.php updatedb || \
+        ${pkgs.php81}/bin/php ${cfg.package}/share/limesurvey/application/commands/console.php install admin password admin admin@example.com verbose
       '';
       serviceConfig = {
         User = user;
diff --git a/nixos/modules/services/web-apps/mediawiki.nix b/nixos/modules/services/web-apps/mediawiki.nix
index 7246fd93a2314..b11626ec2dc3b 100644
--- a/nixos/modules/services/web-apps/mediawiki.nix
+++ b/nixos/modules/services/web-apps/mediawiki.nix
@@ -246,7 +246,9 @@ in
 
       passwordFile = mkOption {
         type = types.path;
-        description = "A file containing the initial password for the admin user.";
+        description = ''
+          A file containing the initial password for the administrator account "admin".
+        '';
         example = "/run/keys/mediawiki-password";
       };
 
diff --git a/nixos/modules/services/web-apps/pretalx.nix b/nixos/modules/services/web-apps/pretalx.nix
index e80eedf9f8590..b062a8b7eeeac 100644
--- a/nixos/modules/services/web-apps/pretalx.nix
+++ b/nixos/modules/services/web-apps/pretalx.nix
@@ -286,16 +286,16 @@ in
         virtualHosts.${cfg.nginx.domain} = {
           # https://docs.pretalx.org/administrator/installation.html#step-7-ssl
           extraConfig = ''
-            more_set_headers Referrer-Policy same-origin;
-            more_set_headers X-Content-Type-Options nosniff;
+            more_set_headers "Referrer-Policy: same-origin";
+            more_set_headers "X-Content-Type-Options: nosniff";
           '';
           locations = {
             "/".proxyPass = "http://pretalx";
             "/media/" = {
-              alias = "${cfg.settings.filesystem.data}/data/media/";
+              alias = "${cfg.settings.filesystem.data}/media/";
               extraConfig = ''
                 access_log off;
-                more_set_headers Content-Disposition 'attachment; filename="$1"';
+                more_set_headers 'Content-Disposition: attachment; filename="$1"';
                 expires 7d;
               '';
             };
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 40470f535bf61..337d53e869efe 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -829,7 +829,7 @@ in
       sslCiphers = mkOption {
         type = types.nullOr types.str;
         # Keep in sync with https://ssl-config.mozilla.org/#server=nginx&config=intermediate
-        default = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
+        default = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305";
         description = "Ciphers to choose from when negotiating TLS handshakes.";
       };
 
diff --git a/nixos/modules/services/x11/desktop-managers/lxqt.nix b/nixos/modules/services/x11/desktop-managers/lxqt.nix
index 3fce3283e71ff..1937bdcbd3e73 100644
--- a/nixos/modules/services/x11/desktop-managers/lxqt.nix
+++ b/nixos/modules/services/x11/desktop-managers/lxqt.nix
@@ -71,7 +71,7 @@ in
 
     services.xserver.libinput.enable = mkDefault true;
 
-    xdg.portal.lxqt.enable = true;
+    xdg.portal.lxqt.enable = mkDefault true;
 
     # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050804
     xdg.portal.config.lxqt.default = mkDefault [ "lxqt" "gtk" ];