about summary refs log tree commit diff
path: root/nixos/modules
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules')
-rw-r--r--nixos/modules/config/nix.nix8
-rw-r--r--nixos/modules/config/no-x-libs.nix1
-rw-r--r--nixos/modules/misc/ids.nix4
-rw-r--r--nixos/modules/module-list.nix3
-rw-r--r--nixos/modules/programs/firejail.nix2
-rw-r--r--nixos/modules/security/acme/default.md16
-rw-r--r--nixos/modules/services/audio/wyoming/faster-whisper.nix1
-rw-r--r--nixos/modules/services/audio/wyoming/piper.nix1
-rw-r--r--nixos/modules/services/backup/borgbackup.nix33
-rw-r--r--nixos/modules/services/backup/btrbk.nix54
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix9
-rw-r--r--nixos/modules/services/cluster/spark/default.nix16
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix4
-rw-r--r--nixos/modules/services/databases/postgresql.md2
-rw-r--r--nixos/modules/services/databases/postgresql.nix11
-rw-r--r--nixos/modules/services/development/zammad.nix55
-rw-r--r--nixos/modules/services/display-managers/greetd.nix12
-rw-r--r--nixos/modules/services/games/teeworlds.nix2
-rw-r--r--nixos/modules/services/hardware/power-profiles-daemon.nix19
-rw-r--r--nixos/modules/services/home-automation/home-assistant.nix11
-rw-r--r--nixos/modules/services/logging/vector.nix6
-rw-r--r--nixos/modules/services/matrix/maubot.nix2
-rw-r--r--nixos/modules/services/misc/guix/default.nix2
-rw-r--r--nixos/modules/services/misc/redmine.nix5
-rw-r--r--nixos/modules/services/misc/tandoor-recipes.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana.nix4
-rw-r--r--nixos/modules/services/monitoring/netdata.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix19
-rw-r--r--nixos/modules/services/monitoring/ups.nix421
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix37
-rw-r--r--nixos/modules/services/networking/ddclient.nix2
-rw-r--r--nixos/modules/services/networking/ejabberd.nix6
-rw-r--r--nixos/modules/services/networking/harmonia.nix6
-rw-r--r--nixos/modules/services/networking/iwd.nix2
-rw-r--r--nixos/modules/services/networking/jigasi.nix237
-rw-r--r--nixos/modules/services/networking/teamspeak3.nix42
-rw-r--r--nixos/modules/services/search/hound.nix77
-rw-r--r--nixos/modules/services/security/clamav.nix52
-rw-r--r--nixos/modules/services/torrent/transmission.nix8
-rw-r--r--nixos/modules/services/web-apps/jitsi-meet.nix38
-rw-r--r--nixos/modules/services/web-apps/keycloak.nix14
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix2
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix2
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix2
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix6
-rw-r--r--nixos/modules/services/web-apps/node-red.nix2
-rw-r--r--nixos/modules/services/web-apps/windmill.nix177
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix2
-rw-r--r--nixos/modules/services/web-servers/caddy/default.nix6
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/location-options.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/tailscale-auth.nix158
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix1
-rw-r--r--nixos/modules/system/boot/networkd.nix2
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix6
-rw-r--r--nixos/modules/tasks/network-interfaces.nix2
-rw-r--r--nixos/modules/virtualisation/incus.nix4
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix4
-rw-r--r--nixos/modules/virtualisation/lxc.nix26
-rw-r--r--nixos/modules/virtualisation/lxcfs.nix12
-rw-r--r--nixos/modules/virtualisation/lxd-agent.nix4
-rw-r--r--nixos/modules/virtualisation/lxd-virtual-machine.nix4
-rw-r--r--nixos/modules/virtualisation/lxd.nix6
63 files changed, 1433 insertions, 246 deletions
diff --git a/nixos/modules/config/nix.nix b/nixos/modules/config/nix.nix
index cee4f54db0cb5..2769d8b25ef6f 100644
--- a/nixos/modules/config/nix.nix
+++ b/nixos/modules/config/nix.nix
@@ -109,13 +109,17 @@ let
         if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
           echo "Ignoring validation for cross-compilation"
         ''
-        else ''
+        else
+        let
+          showCommand = if isNixAtLeast "2.20pre" then "config show" else "show-config";
+        in
+        ''
           echo "Validating generated nix.conf"
           ln -s $out ./nix.conf
           set -e
           set +o pipefail
           NIX_CONF_DIR=$PWD \
-            ${cfg.package}/bin/nix show-config ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
+            ${cfg.package}/bin/nix ${showCommand} ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
               ${optionalString (isNixAtLeast "2.4pre") "--option experimental-features nix-command"} \
             |& sed -e 's/^warning:/error:/' \
             | (! grep '${if cfg.checkAllErrors then "^error:" else "^error: unknown setting"}')
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index b2eb46f273b14..ec26d4b12eff0 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -51,6 +51,7 @@ with lib;
       mc = super.mc.override { x11Support = false; };
       mpv-unwrapped = super.mpv-unwrapped.override { sdl2Support = false; x11Support = false; waylandSupport = false; };
       msmtp = super.msmtp.override { withKeyring = false; };
+      mupdf = super.mupdf.override { enableGL = false; enableX11 = false; };
       neofetch = super.neofetch.override { x11Support = false; };
       networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 18928a6bf21bb..5af7284ac71af 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -288,7 +288,7 @@ in
       telegraf = 256;
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs  = 261;
       # stanchion = 262; # unused, removed 2020-10-14
@@ -599,7 +599,7 @@ in
       #telegraf = 256; # unused
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs = 261;
       # stanchion = 262; # unused, removed 2020-10-14
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index fee7c35ed8f45..820ce54ddeb6c 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -971,6 +971,7 @@
   ./services/networking/iwd.nix
   ./services/networking/jibri/default.nix
   ./services/networking/jicofo.nix
+  ./services/networking/jigasi.nix
   ./services/networking/jitsi-videobridge.nix
   ./services/networking/jool.nix
   ./services/networking/kea.nix
@@ -1334,6 +1335,7 @@
   ./services/web-apps/vikunja.nix
   ./services/web-apps/whitebophir.nix
   ./services/web-apps/wiki-js.nix
+  ./services/web-apps/windmill.nix
   ./services/web-apps/wordpress.nix
   ./services/web-apps/writefreely.nix
   ./services/web-apps/youtrack.nix
@@ -1359,6 +1361,7 @@
   ./services/web-servers/molly-brown.nix
   ./services/web-servers/nginx/default.nix
   ./services/web-servers/nginx/gitweb.nix
+  ./services/web-servers/nginx/tailscale-auth.nix
   ./services/web-servers/phpfpm/default.nix
   ./services/web-servers/pomerium.nix
   ./services/web-servers/rustus.nix
diff --git a/nixos/modules/programs/firejail.nix b/nixos/modules/programs/firejail.nix
index 6f79c13d94b44..046c31ce64f6b 100644
--- a/nixos/modules/programs/firejail.nix
+++ b/nixos/modules/programs/firejail.nix
@@ -53,7 +53,7 @@ in {
           desktop = mkOption {
             type = types.nullOr types.path;
             default = null;
-            description = lib.mkDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
+            description = lib.mdDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
             example = literalExpression ''"''${pkgs.firefox}/share/applications/firefox.desktop"'';
           };
           profile = mkOption {
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 31548ad181a73..51ee0428d84e5 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -45,7 +45,7 @@ placeholder certificates in place of the real ACME certs. The placeholder
 certs are overwritten when the ACME certs arrive. For
 `foo.example.com` the config would look like this:
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 services.nginx = {
@@ -88,7 +88,7 @@ This example uses a vhost called `certs.example.com`, with
 the intent that you will generate certs for all your vhosts and redirect
 everyone to HTTPS.
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 
@@ -136,7 +136,7 @@ services.httpd = {
 
 Now you need to configure ACME to generate a certificate.
 
-```
+```nix
 security.acme.certs."foo.example.com" = {
   webroot = "/var/lib/acme/.challenges";
   email = "foo@example.com";
@@ -167,7 +167,7 @@ see the [lego docs](https://go-acme.github.io/lego/dns/)
 for provider/server specific configuration values. For the sake of these
 docs, we will provide a fully self-hosted example using bind.
 
-```
+```nix
 services.bind = {
   enable = true;
   extraConfig = ''
@@ -199,7 +199,7 @@ The {file}`dnskeys.conf` and {file}`certs.secret`
 must be kept secure and thus you should not keep their contents in your
 Nix config. Instead, generate them one time with a systemd service:
 
-```
+```nix
 systemd.services.dns-rfc2136-conf = {
   requiredBy = ["acme-example.com.service" "bind.service"];
   before = ["acme-example.com.service" "bind.service"];
@@ -250,7 +250,7 @@ first, however instead of setting the options for one certificate
 you will set them as defaults
 (e.g. [](#opt-security.acme.defaults.dnsProvider)).
 
-```
+```nix
 # Configure ACME appropriately
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
@@ -287,7 +287,7 @@ There is no way to change the user the ACME module uses (it will always be
 Below is an example configuration for OpenSMTPD, but this pattern
 can be applied to any service.
 
-```
+```nix
 # Configure ACME however you like (DNS or HTTP validation), adding
 # the following configuration for the relevant certificate.
 # Note: You cannot use `systemctl reload` here as that would mean
@@ -340,7 +340,7 @@ to be regenerated. In this scenario lego will produce the error `JWS verificatio
 The solution is to simply delete the associated accounts file and
 re-run the affected service(s).
 
-```
+```shell
 # Find the accounts folder for the certificate
 systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
 export accountdir="$(!!)"
diff --git a/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixos/modules/services/audio/wyoming/faster-whisper.nix
index eda409f1f8004..dd7f62744cd02 100644
--- a/nixos/modules/services/audio/wyoming/faster-whisper.nix
+++ b/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -121,6 +121,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-faster-whisper-${server}" {
+        inherit (options) enable;
         description = "Wyoming faster-whisper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/audio/wyoming/piper.nix b/nixos/modules/services/audio/wyoming/piper.nix
index 698828aa6cbaf..2828fdf078921 100644
--- a/nixos/modules/services/audio/wyoming/piper.nix
+++ b/nixos/modules/services/audio/wyoming/piper.nix
@@ -116,6 +116,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-piper-${server}" {
+        inherit (options) enable;
         description = "Wyoming Piper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
index 039a5f227ac41..393fe83f493f5 100644
--- a/nixos/modules/services/backup/borgbackup.nix
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -602,53 +602,56 @@ in {
           };
 
           extraArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for all {command}`borg` calls the
               service has. Handle with care.
             '';
-            default = "";
-            example = "--remote-path=/path/to/borg";
+            default = [ ];
+            example = [ "--remote-path=/path/to/borg" ];
           };
 
           extraInitArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg init`.
               Can also be set at runtime using `$extraInitArgs`.
             '';
-            default = "";
-            example = "--append-only";
+            default = [ ];
+            example = [ "--append-only" ];
           };
 
           extraCreateArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg create`.
               Can also be set at runtime using `$extraCreateArgs`.
             '';
-            default = "";
-            example = "--stats --checkpoint-interval 600";
+            default = [ ];
+            example = [
+              "--stats"
+              "--checkpoint-interval 600"
+            ];
           };
 
           extraPruneArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg prune`.
               Can also be set at runtime using `$extraPruneArgs`.
             '';
-            default = "";
-            example = "--save-space";
+            default = [ ];
+            example = [ "--save-space" ];
           };
 
           extraCompactArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg compact`.
               Can also be set at runtime using `$extraCompactArgs`.
             '';
-            default = "";
-            example = "--cleanup-commits";
+            default = [ ];
+            example = [ "--cleanup-commits" ];
           };
         };
       }
diff --git a/nixos/modules/services/backup/btrbk.nix b/nixos/modules/services/backup/btrbk.nix
index 1e90ef54d33f9..364b77b6a21c1 100644
--- a/nixos/modules/services/backup/btrbk.nix
+++ b/nixos/modules/services/backup/btrbk.nix
@@ -6,14 +6,17 @@ let
     concatMapStringsSep
     concatStringsSep
     filterAttrs
+    flatten
+    getAttr
     isAttrs
     literalExpression
     mapAttrs'
     mapAttrsToList
     mkIf
     mkOption
+    optional
     optionalString
-    sort
+    sortOn
     types
     ;
 
@@ -37,7 +40,7 @@ let
   genConfig = set:
     let
       pairs = mapAttrsToList (name: value: { inherit name value; }) set;
-      sortedPairs = sort (a: b: prioOf a < prioOf b) pairs;
+      sortedPairs = sortOn prioOf pairs;
     in
       concatMap genPair sortedPairs;
   genSection = sec: secName: value:
@@ -84,6 +87,18 @@ let
     '';
   };
 
+  streamCompressMap = {
+    gzip = pkgs.gzip;
+    pigz = pkgs.pigz;
+    bzip2 = pkgs.bzip2;
+    pbzip2 = pkgs.pbzip2;
+    bzip3 = pkgs.bzip3;
+    xz = pkgs.xz;
+    lzo = pkgs.lzo;
+    lz4 = pkgs.lz4;
+    zstd = pkgs.zstd;
+  };
+
   cfg = config.services.btrbk;
   sshEnabled = cfg.sshAccess != [ ];
   serviceEnabled = cfg.instances != { };
@@ -94,7 +109,14 @@ in
   options = {
     services.btrbk = {
       extraPackages = mkOption {
-        description = lib.mdDoc "Extra packages for btrbk, like compression utilities for `stream_compress`";
+        description = lib.mdDoc ''
+          Extra packages for btrbk, like compression utilities for `stream_compress`.
+
+          **Note**: This option will get deprecated in future releases.
+          Required compression programs will get automatically provided to btrbk
+          depending on configured compression method in
+          `services.btrbk.instances.<name>.settings` option.
+        '';
         type = types.listOf types.package;
         default = [ ];
         example = literalExpression "[ pkgs.xz ]";
@@ -124,7 +146,19 @@ in
                   '';
                 };
                 settings = mkOption {
-                  type = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                  type = types.submodule {
+                    freeformType = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                    options = {
+                      stream_compress = mkOption {
+                        description = lib.mdDoc ''
+                          Compress the btrfs send stream before transferring it from/to remote locations using a
+                          compression command.
+                        '';
+                        type = types.enum ["gzip" "pigz" "bzip2" "pbzip2" "bzip3" "xz" "lzo" "lz4" "zstd" "no"];
+                        default = "no";
+                      };
+                    };
+                  };
                   default = { };
                   example = {
                     snapshot_preserve_min = "2d";
@@ -169,6 +203,11 @@ in
 
   };
   config = mkIf (sshEnabled || serviceEnabled) {
+
+    warnings = optional (cfg.extraPackages != []) ''
+      extraPackages option will be deprecated in future releases. Programs required for compression are now automatically selected depending on services.btrbk.instances.<name>.settings.stream_compress option.
+    '';
+
     environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
 
     security.sudo.extraRules = mkIf (sudo_doas == "sudo") [ sudoRule ];
@@ -232,12 +271,15 @@ in
       cfg.instances;
     systemd.services = mapAttrs'
       (
-        name: _: {
+        name: instance: {
           name = "btrbk-${name}";
           value = {
             description = "Takes BTRFS snapshots and maintains retention policies.";
             unitConfig.Documentation = "man:btrbk(1)";
-            path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+            path = [ "/run/wrappers" ]
+              ++ cfg.extraPackages
+              ++ optional (instance.settings.stream_compress != "no")
+                (getAttr instance.settings.stream_compress streamCompressMap);
             serviceConfig = {
               User = "btrbk";
               Group = "btrbk";
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index 11c5adc6a8859..dca8996df0831 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -13,6 +13,13 @@ in
   ###### interface
   options.services.kubernetes.flannel = {
     enable = mkEnableOption (lib.mdDoc "flannel networking");
+
+    openFirewallPorts = mkOption {
+      description = lib.mdDoc ''
+        Whether to open the Flannel UDP ports in the firewall on all interfaces.'';
+      type = types.bool;
+      default = true;
+    };
   };
 
   ###### implementation
@@ -38,7 +45,7 @@ in
     };
 
     networking = {
-      firewall.allowedUDPPorts = [
+      firewall.allowedUDPPorts = mkIf cfg.openFirewallPorts [
         8285  # flannel udp
         8472  # flannel vxlan
       ];
diff --git a/nixos/modules/services/cluster/spark/default.nix b/nixos/modules/services/cluster/spark/default.nix
index 2e3914a734bea..b3e1ac399ae9f 100644
--- a/nixos/modules/services/cluster/spark/default.nix
+++ b/nixos/modules/services/cluster/spark/default.nix
@@ -69,8 +69,8 @@ with lib;
       confDir = mkOption {
         type = types.path;
         description = lib.mdDoc "Spark configuration directory. Spark will use the configuration files (spark-defaults.conf, spark-env.sh, log4j.properties, etc) from this directory.";
-        default = "${cfg.package}/lib/${cfg.package.untarDir}/conf";
-        defaultText = literalExpression ''"''${package}/lib/''${package.untarDir}/conf"'';
+        default = "${cfg.package}/conf";
+        defaultText = literalExpression ''"''${package}/conf"'';
       };
       logDir = mkOption {
         type = types.path;
@@ -111,9 +111,9 @@ with lib;
             Type = "forking";
             User = "spark";
             Group = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-master.sh";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-master.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-master.sh";
+            ExecStop  = "${cfg.package}/sbin/stop-master.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
@@ -134,9 +134,9 @@ with lib;
           serviceConfig = {
             Type = "forking";
             User = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-worker.sh spark://${cfg.worker.master}";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-worker.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-worker.sh spark://${cfg.worker.master}";
+            ExecStop  = "${cfg.package}/sbin/stop-worker.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index b906788209b19..2a836c24dda36 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -128,9 +128,7 @@ in {
         '';
       };
 
-      package = mkPackageOption pkgs "python3Packages.buildbot-worker" {
-        example = "python2Packages.buildbot-worker";
-      };
+      package = mkPackageOption pkgs "buildbot-worker" { };
 
       packages = mkOption {
         default = with pkgs; [ git ];
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index e5e0b7efec29a..7d141f12b5dea 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -258,7 +258,7 @@ postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
 To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
 ```
 services.postgresql.package = pkgs.postgresql_12;
-services.postgresql.extraPlugins = with pkgs.postgresql_12.pkgs; [
+services.postgresql.extraPlugins = ps: with ps; [
   pg_repack
   postgis
 ];
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 690f2d85a4c9a..d0058fd1948b0 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -18,7 +18,7 @@ let
     in
     if cfg.extraPlugins == []
       then base
-      else base.withPackages (_: cfg.extraPlugins);
+      else base.withPackages cfg.extraPlugins;
 
   toStr = value:
     if true == value then "yes"
@@ -391,12 +391,11 @@ in
       };
 
       extraPlugins = mkOption {
-        type = types.listOf types.path;
-        default = [];
-        example = literalExpression "with pkgs.postgresql_15.pkgs; [ postgis pg_repack ]";
+        type = with types; coercedTo (listOf path) (path: _ignorePg: path) (functionTo (listOf path));
+        default = _: [];
+        example = literalExpression "ps: with ps; [ postgis pg_repack ]";
         description = lib.mdDoc ''
-          List of PostgreSQL plugins. PostgreSQL version for each plugin should
-          match version for `services.postgresql.package` value.
+          List of PostgreSQL plugins.
         '';
       };
 
diff --git a/nixos/modules/services/development/zammad.nix b/nixos/modules/services/development/zammad.nix
index 87aceddd6635c..c084d6541ad38 100644
--- a/nixos/modules/services/development/zammad.nix
+++ b/nixos/modules/services/development/zammad.nix
@@ -21,6 +21,7 @@ let
     NODE_ENV = "production";
     RAILS_SERVE_STATIC_FILES = "true";
     RAILS_LOG_TO_STDOUT = "true";
+    REDIS_URL = "redis://${cfg.redis.host}:${toString cfg.redis.port}";
   };
   databaseConfig = settingsFormat.generate "database.yml" cfg.database.settings;
 in
@@ -65,6 +66,36 @@ in
         description = lib.mdDoc "Websocket service port.";
       };
 
+      redis = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local redis automatically.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zammad";
+          description = lib.mdDoc ''
+            Name of the redis server. Only used if `createLocally` is set to true.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Redis server address.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 6379;
+          description = lib.mdDoc "Port of the redis server.";
+        };
+      };
+
       database = {
         type = mkOption {
           type = types.enum [ "PostgreSQL" "MySQL" ];
@@ -206,6 +237,10 @@ in
         assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
         message = "a password cannot be specified if services.zammad.database.createLocally is set to true";
       }
+      {
+        assertion = cfg.redis.createLocally -> cfg.redis.host == "localhost";
+        message = "the redis host must be localhost if services.zammad.redis.createLocally is set to true";
+      }
     ];
 
     services.mysql = optionalAttrs (cfg.database.createLocally && cfg.database.type == "MySQL") {
@@ -231,6 +266,13 @@ in
       ];
     };
 
+    services.redis = optionalAttrs cfg.redis.createLocally {
+      servers."${cfg.redis.name}" = {
+        enable = true;
+        port = cfg.redis.port;
+      };
+    };
+
     systemd.services.zammad-web = {
       inherit environment;
       serviceConfig = serviceConfig // {
@@ -240,6 +282,8 @@ in
       after = [
         "network.target"
         "postgresql.service"
+      ] ++ optionals cfg.redis.createLocally [
+        "redis-${cfg.redis.name}.service"
       ];
       requires = [
         "postgresql.service"
@@ -303,16 +347,15 @@ in
       script = "./script/websocket-server.rb -b ${cfg.host} -p ${toString cfg.websocketPort} start";
     };
 
-    systemd.services.zammad-scheduler = {
-      inherit environment;
-      serviceConfig = serviceConfig // { Type = "forking"; };
+    systemd.services.zammad-worker = {
+      inherit serviceConfig environment;
       after = [ "zammad-web.service" ];
       requires = [ "zammad-web.service" ];
-      description = "Zammad scheduler";
+      description = "Zammad background worker";
       wantedBy = [ "multi-user.target" ];
-      script = "./script/scheduler.rb start";
+      script = "./script/background-worker.rb start";
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ garbas taeer ];
+  meta.maintainers = with lib.maintainers; [ taeer netali ];
 }
diff --git a/nixos/modules/services/display-managers/greetd.nix b/nixos/modules/services/display-managers/greetd.nix
index 779e141ca24bd..2212f97a9ffe2 100644
--- a/nixos/modules/services/display-managers/greetd.nix
+++ b/nixos/modules/services/display-managers/greetd.nix
@@ -4,7 +4,7 @@ with lib;
 let
   cfg = config.services.greetd;
   tty = "tty${toString cfg.vt}";
-  settingsFormat = pkgs.formats.toml {};
+  settingsFormat = pkgs.formats.toml { };
 in
 {
   options.services.greetd = {
@@ -27,7 +27,7 @@ in
       '';
     };
 
-    vt = mkOption  {
+    vt = mkOption {
       type = types.int;
       default = 1;
       description = lib.mdDoc ''
@@ -97,12 +97,18 @@ in
 
     systemd.defaultUnit = "graphical.target";
 
+    # Create directories potentially required by supported greeters
+    # See https://github.com/NixOS/nixpkgs/issues/248323
+    systemd.tmpfiles.rules = [
+      "d '/var/cache/tuigreet' - greeter greeter - -"
+    ];
+
     users.users.greeter = {
       isSystemUser = true;
       group = "greeter";
     };
 
-    users.groups.greeter = {};
+    users.groups.greeter = { };
   };
 
   meta.maintainers = with maintainers; [ queezle ];
diff --git a/nixos/modules/services/games/teeworlds.nix b/nixos/modules/services/games/teeworlds.nix
index ffef440330c4e..bd0df1ffca578 100644
--- a/nixos/modules/services/games/teeworlds.nix
+++ b/nixos/modules/services/games/teeworlds.nix
@@ -100,7 +100,7 @@ in
 
       serviceConfig = {
         DynamicUser = true;
-        ExecStart = "${pkgs.teeworlds}/bin/teeworlds_srv -f ${teeworldsConf}";
+        ExecStart = "${pkgs.teeworlds-server}/bin/teeworlds_srv -f ${teeworldsConf}";
 
         # Hardening
         CapabilityBoundingSet = false;
diff --git a/nixos/modules/services/hardware/power-profiles-daemon.nix b/nixos/modules/services/hardware/power-profiles-daemon.nix
index 101da01b4a712..1d84bf8ac937c 100644
--- a/nixos/modules/services/hardware/power-profiles-daemon.nix
+++ b/nixos/modules/services/hardware/power-profiles-daemon.nix
@@ -1,10 +1,7 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.services.power-profiles-daemon;
-  package = pkgs.power-profiles-daemon;
 in
 
 {
@@ -15,8 +12,8 @@ in
 
     services.power-profiles-daemon = {
 
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Whether to enable power-profiles-daemon, a DBus daemon that allows
@@ -24,6 +21,8 @@ in
         '';
       };
 
+      package = lib.mkPackageOption pkgs "power-profiles-daemon" { };
+
     };
 
   };
@@ -31,7 +30,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
 
     assertions = [
       { assertion = !config.services.tlp.enable;
@@ -42,13 +41,13 @@ in
       }
     ];
 
-    environment.systemPackages = [ package ];
+    environment.systemPackages = [ cfg.package ];
 
-    services.dbus.packages = [ package ];
+    services.dbus.packages = [ cfg.package ];
 
-    services.udev.packages = [ package ];
+    services.udev.packages = [ cfg.package ];
 
-    systemd.packages = [ package ];
+    systemd.packages = [ cfg.package ];
 
   };
 
diff --git a/nixos/modules/services/home-automation/home-assistant.nix b/nixos/modules/services/home-automation/home-assistant.nix
index 6aa0ae9eba47a..2a6b07c6f1a6e 100644
--- a/nixos/modules/services/home-automation/home-assistant.nix
+++ b/nixos/modules/services/home-automation/home-assistant.nix
@@ -11,14 +11,12 @@ let
   # options shown in settings.
   # We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
   # https://www.home-assistant.io/docs/configuration/secrets/
-  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
+  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) (lib.recursiveUpdate customLovelaceModulesResources (cfg.config or {}));
   configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
     cp ${format.generate "configuration.yaml" filteredConfig} $out
     sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
   '';
-  lovelaceConfig = if (cfg.lovelaceConfig == null) then {}
-    else (lib.recursiveUpdate customLovelaceModulesResources cfg.lovelaceConfig);
-  lovelaceConfigFile = format.generate "ui-lovelace.yaml" lovelaceConfig;
+  lovelaceConfigFile = format.generate "ui-lovelace.yaml" cfg.lovelaceConfig;
 
   # Components advertised by the home-assistant package
   availableComponents = cfg.package.availableComponents;
@@ -77,7 +75,7 @@ let
   # Create parts of the lovelace config that reference lovelave modules as resources
   customLovelaceModulesResources = {
     lovelace.resources = map (card: {
-      url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname}.js?${card.version}";
+      url = "/local/nixos-lovelace-modules/${card.entrypoint or (card.pname + ".js")}?${card.version}";
       type = "module";
     }) cfg.customLovelaceModules;
   };
@@ -159,7 +157,7 @@ in {
       default = [];
       example = literalExpression ''
         with pkgs.home-assistant-custom-components; [
-          prometheus-sensor
+          prometheus_sensor
         ];
       '';
       description = lib.mdDoc ''
@@ -525,7 +523,6 @@ in {
           "bluetooth_tracker"
           "bthome"
           "default_config"
-          "eq3btsmart"
           "eufylife_ble"
           "esphome"
           "fjaraskupan"
diff --git a/nixos/modules/services/logging/vector.nix b/nixos/modules/services/logging/vector.nix
index 48f9eeb4ce8f0..9ccf8a4fa0610 100644
--- a/nixos/modules/services/logging/vector.nix
+++ b/nixos/modules/services/logging/vector.nix
@@ -51,13 +51,17 @@ in
         {
           ExecStart = "${getExe cfg.package} --config ${validateConfig conf}";
           DynamicUser = true;
-          Restart = "no";
+          Restart = "always";
           StateDirectory = "vector";
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
           AmbientCapabilities = "CAP_NET_BIND_SERVICE";
           # This group is required for accessing journald.
           SupplementaryGroups = mkIf cfg.journaldAccess "systemd-journal";
         };
+      unitConfig = {
+        StartLimitIntervalSec = 10;
+        StartLimitBurst = 5;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/matrix/maubot.nix b/nixos/modules/services/matrix/maubot.nix
index 7d392c22983b4..bc96ca03b1fc7 100644
--- a/nixos/modules/services/matrix/maubot.nix
+++ b/nixos/modules/services/matrix/maubot.nix
@@ -42,7 +42,7 @@ let
       database = lib.last (lib.splitString "/" noSchema);
     };
 
-  postgresDBs = [
+  postgresDBs = builtins.filter isPostgresql [
     cfg.settings.database
     cfg.settings.crypto_database
     cfg.settings.plugin_databases.postgres
diff --git a/nixos/modules/services/misc/guix/default.nix b/nixos/modules/services/misc/guix/default.nix
index 00e84dc745541..7b468e7067cc2 100644
--- a/nixos/modules/services/misc/guix/default.nix
+++ b/nixos/modules/services/misc/guix/default.nix
@@ -265,7 +265,7 @@ in
         linkProfileToPath = acc: profile: location: let
           guixProfile = "${cfg.stateDir}/guix/profiles/per-user/\${USER}/${profile}";
           in acc + ''
-            [ -d "${guixProfile}" ] && ln -sf "${guixProfile}" "${location}"
+            [ -d "${guixProfile}" ] && [ -L "${location}" ] || ln -sf "${guixProfile}" "${location}"
           '';
 
         activationScript = lib.foldlAttrs linkProfileToPath "" guixUserProfiles;
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index e3941d2e29de4..b517170cda216 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -264,9 +264,12 @@ in
       { assertion = cfg.database.passwordFile != null || cfg.database.socket != null;
         message = "one of services.redmine.database.socket or services.redmine.database.passwordFile must be set";
       }
-      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user && cfg.database.user == cfg.database.name;
+      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
         message = "services.redmine.database.user must be set to ${cfg.user} if services.redmine.database.createLocally is set true";
       }
+      { assertion = pgsqlLocal -> cfg.database.user == cfg.database.name;
+        message = "services.redmine.database.user and services.redmine.database.name must be the same when using a local postgresql database";
+      }
       { assertion = cfg.database.createLocally -> cfg.database.socket != null;
         message = "services.redmine.database.socket must be set if services.redmine.database.createLocally is set to true";
       }
diff --git a/nixos/modules/services/misc/tandoor-recipes.nix b/nixos/modules/services/misc/tandoor-recipes.nix
index 2d7d29b2e7172..6c51a9bb85550 100644
--- a/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixos/modules/services/misc/tandoor-recipes.nix
@@ -12,7 +12,7 @@ let
     DEBUG_TOOLBAR = "0";
     MEDIA_ROOT = "/var/lib/tandoor-recipes";
   } // optionalAttrs (config.time.timeZone != null) {
-    TIMEZONE = config.time.timeZone;
+    TZ = config.time.timeZone;
   } // (
     lib.mapAttrs (_: toString) cfg.extraConfig
   );
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 62c50490ee99a..5ac010bf81ee8 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -74,7 +74,7 @@ let
     fi
   '';
   provisionConfDir = pkgs.runCommand "grafana-provisioning" { nativeBuildInputs = [ pkgs.xorg.lndir ]; } ''
-    mkdir -p $out/{datasources,dashboards,notifiers,alerting}
+    mkdir -p $out/{alerting,datasources,dashboards,notifiers,plugins}
     ${ln { src = datasourceFileOrDir;    dir = "datasources"; filename = "datasource"; }}
     ${ln { src = dashboardFileOrDir;     dir = "dashboards";  filename = "dashboard"; }}
     ${ln { src = notifierFileOrDir;      dir = "notifiers";   filename = "notifier"; }}
@@ -1831,7 +1831,7 @@ in
         set -o errexit -o pipefail -o nounset -o errtrace
         shopt -s inherit_errexit
 
-        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir} -config ${configFile}
+        exec ${cfg.package}/bin/grafana server -homepath ${cfg.dataDir} -config ${configFile}
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 78b12537e27fd..ec6aa56150397 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -198,6 +198,7 @@ in {
         }
       ];
 
+    services.netdata.configDir.".opt-out-from-anonymous-statistics" = mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
     environment.etc."netdata/netdata.conf".source = configFile;
     environment.etc."netdata/conf.d".source = configDirectory;
 
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 90ea56658b02d..e78cb4d01dc5b 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -41,12 +41,12 @@ let
   # This becomes the main config file for Prometheus
   promConfig = {
     global = filterValidPrometheus cfg.globalConfig;
-    rule_files = map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
-      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
-    ]);
     scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
     remote_write = filterValidPrometheus cfg.remoteWrite;
     remote_read = filterValidPrometheus cfg.remoteRead;
+    rule_files = optionals (!(cfg.enableAgentMode)) (map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
+    ]));
     alerting = {
       inherit (cfg) alertmanagers;
     };
@@ -62,15 +62,20 @@ let
     promtoolCheck "check config ${lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"}" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
-    "--storage.tsdb.path=${workingDir}/data/"
     "--config.file=${
       if cfg.enableReload
       then "/etc/prometheus/prometheus.yaml"
       else prometheusYml
     }"
     "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
-    "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
-  ] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
+  ] ++ (
+    if (cfg.enableAgentMode) then [
+      "--enable-feature=agent"
+    ] else [
+       "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity }"
+       "--storage.tsdb.path=${workingDir}/data/"
+    ])
+    ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
     ++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}"
     ++ optional (cfg.webConfigFile != null) "--web.config.file=${cfg.webConfigFile}";
 
@@ -1612,6 +1617,8 @@ in
       '';
     };
 
+    enableAgentMode = mkEnableOption (lib.mdDoc "agent mode");
+
     configText = mkOption {
       type = types.nullOr types.lines;
       default = null;
diff --git a/nixos/modules/services/monitoring/ups.nix b/nixos/modules/services/monitoring/ups.nix
index efef2d777acd8..c9dda8a8c0930 100644
--- a/nixos/modules/services/monitoring/ups.nix
+++ b/nixos/modules/services/monitoring/ups.nix
@@ -6,9 +6,83 @@ with lib;
 
 let
   cfg = config.power.ups;
-in
+  defaultPort = 3493;
+
+  nutFormat = {
+
+    type = with lib.types; let
+
+      singleAtom = nullOr (oneOf [
+        bool
+        int
+        float
+        str
+      ]) // {
+        description = "atom (null, bool, int, float or string)";
+      };
+
+      in attrsOf (oneOf [
+        singleAtom
+        (listOf (nonEmptyListOf singleAtom))
+      ]);
+
+    generate = name: value:
+      let
+        normalizedValue =
+          lib.mapAttrs (key: val:
+            if lib.isList val
+            then forEach val (elem: if lib.isList elem then elem else [elem])
+            else
+              if val == null
+              then []
+              else [[val]]
+          ) value;
+
+        mkValueString = concatMapStringsSep " " (v:
+          let str = generators.mkValueStringDefault {} v;
+          in
+            # Quote the value if it has spaces and isn't already quoted.
+            if (hasInfix " " str) && !(hasPrefix "\"" str && hasSuffix "\"" str)
+            then "\"${str}\""
+            else str
+        );
+
+      in pkgs.writeText name (lib.generators.toKeyValue {
+        mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
+        listsAsDuplicateKeys = true;
+      } normalizedValue);
+
+  };
+
+  installSecrets = source: target: secrets:
+    pkgs.writeShellScript "installSecrets.sh" ''
+      install -m0600 -D ${source} "${target}"
+      ${concatLines (forEach secrets (name: ''
+        ${pkgs.replace-secret}/bin/replace-secret \
+          '@${name}@' \
+          "$CREDENTIALS_DIRECTORY/${name}" \
+          "${target}"
+      ''))}
+      chmod u-w "${target}"
+    '';
+
+  upsmonConf = nutFormat.generate "upsmon.conf" cfg.upsmon.settings;
+
+  upsdUsers = pkgs.writeText "upsd.users" (let
+    # This looks like INI, but it's not quite because the
+    # 'upsmon' option lacks a '='. See: man upsd.users
+    userConfig = name: user: concatStringsSep "\n      " (concatLists [
+      [
+        "[${name}]"
+        "password = \"@upsdusers_password_${name}@\""
+      ]
+      (optional (user.upsmon != null) "upsmon ${user.upsmon}")
+      (forEach user.actions (action: "actions = ${action}"))
+      (forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
+    ]);
+  in concatStringsSep "\n\n" (mapAttrsToList userConfig cfg.users));
+
 
-let
   upsOptions = {name, config, ...}:
   {
     options = {
@@ -95,6 +169,213 @@ let
     };
   };
 
+  listenOptions = {
+    options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = defaultPort;
+        description = lib.mdDoc ''
+          TCP port for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+    };
+  };
+
+  upsdOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`";
+        description = mdDoc "Whether to enable `upsd`.";
+      };
+
+      listen = mkOption {
+        type = with types; listOf (submodule listenOptions);
+        default = [];
+        example = [
+          {
+            address = "192.168.50.1";
+          }
+          {
+            address = "::1";
+            port = 5923;
+          }
+        ];
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd` for details`.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional lines to add to `upsd.conf`.
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" ]);
+    };
+  };
+
+
+  monitorOptions = { name, config, ... }: {
+    options = {
+      system = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc ''
+          Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      powerValue = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          Number of power supplies that the UPS feeds on this system.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Username from `upsd.users` for accessing this UPS.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.str;
+        defaultText = literalMD "power.ups.users.\${user}.passwordFile";
+        description = lib.mdDoc ''
+          The full path to a file containing the password from
+          `upsd.users` for accessing this UPS. The password file
+          is read on service start.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      type = mkOption {
+        type = types.str;
+        default = "master";
+        description = lib.mdDoc ''
+          The relationship with `upsd`.
+          See `upsmon.conf` for details.
+        '';
+      };
+    };
+
+    config = {
+      passwordFile = mkDefault cfg.users.${config.user}.passwordFile;
+    };
+  };
+
+  upsmonOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`, `netclient`";
+        description = mdDoc "Whether to enable `upsmon`.";
+      };
+
+      monitor = mkOption {
+        type = with types; attrsOf (submodule monitorOptions);
+        default = {};
+        description = lib.mdDoc ''
+          Set of UPS to monitor. See `man upsmon.conf` for details.
+        '';
+      };
+
+      settings = mkOption {
+        type = nutFormat.type;
+        default = {};
+        defaultText = literalMD ''
+          {
+            MINSUPPLIES = 1;
+            RUN_AS_USER = "root";
+            NOTIFYCMD = "''${pkgs.nut}/bin/upssched";
+            SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now";
+          }
+        '';
+        description = mdDoc "Additional settings to add to `upsmon.conf`.";
+        example = literalMD ''
+          {
+            MINSUPPLIES = 2;
+            NOTIFYFLAG = [
+              [ "ONLINE" "SYSLOG+EXEC" ]
+              [ "ONBATT" "SYSLOG+EXEC" ]
+            ];
+          }
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" "netclient" ]);
+      settings = {
+        RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
+        MINSUPPLIES = mkDefault 1;
+        NOTIFYCMD = mkDefault "${pkgs.nut}/bin/upssched";
+        SHUTDOWNCMD = mkDefault "${pkgs.systemd}/bin/shutdown now";
+        MONITOR = flip mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
+      };
+    };
+  };
+
+  userOptions = {
+    options = {
+      passwordFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The full path to a file that contains the user's (clear text)
+          password. The password file is read on service start.
+        '';
+      };
+
+      actions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Allow the user to do certain things with upsd.
+          See `man upsd.users` for details.
+        '';
+      };
+
+      instcmds = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Let the user initiate specific instant commands. Use "ALL" to grant all commands automatically. For the full list of what your UPS supports, use "upscmd -l".
+          See `man upsd.users` for details.
+        '';
+      };
+
+      upsmon = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Add the necessary actions for a upsmon process to work.
+          See `man upsd.users` for details.
+        '';
+      };
+    };
+  };
+
 in
 
 
@@ -103,19 +384,14 @@ in
     # powerManagement.powerDownCommands
 
     power.ups = {
-      enable = mkOption {
-        default = false;
-        type = with types; bool;
-        description = lib.mdDoc ''
-          Enables support for Power Devices, such as Uninterruptible Power
-          Supplies, Power Distribution Units and Solar Controllers.
-        '';
-      };
+      enable = mkEnableOption (lib.mdDoc ''
+        Enables support for Power Devices, such as Uninterruptible Power
+        Supplies, Power Distribution Units and Solar Controllers.
+      '');
 
-      # This option is not used yet.
       mode = mkOption {
         default = "standalone";
-        type = types.str;
+        type = types.enum [ "none" "standalone" "netserver" "netclient" ];
         description = lib.mdDoc ''
           The MODE determines which part of the NUT is to be started, and
           which configuration files must be modified.
@@ -148,6 +424,13 @@ in
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for `upsd`.
+        '';
+      };
 
       maxStartDelay = mkOption {
         default = 45;
@@ -161,6 +444,22 @@ in
         '';
       };
 
+      upsmon = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsmon.conf` configuration file.
+        '';
+        type = types.submodule upsmonOptions;
+      };
+
+      upsd = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsd.conf` configuration file.
+        '';
+        type = types.submodule upsdOptions;
+      };
+
       ups = mkOption {
         default = {};
         # see nut/etc/ups.conf.sample
@@ -172,46 +471,95 @@ in
         type = with types; attrsOf (submodule upsOptions);
       };
 
+      users = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Users that can access upsd. See `man upsd.users`.
+        '';
+        type = with types; attrsOf (submodule userOptions);
+      };
+
     };
   };
 
   config = mkIf cfg.enable {
 
+    assertions = [
+      (let
+        totalPowerValue = foldl' add 0 (map (monitor: monitor.powerValue) (attrValues cfg.upsmon.monitor));
+        minSupplies = cfg.upsmon.settings.MINSUPPLIES;
+      in mkIf cfg.upsmon.enable {
+        assertion = totalPowerValue >= minSupplies;
+        message = ''
+          `power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
+        '';
+      })
+    ];
+
     environment.systemPackages = [ pkgs.nut ];
 
-    systemd.services.upsmon = {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts =
+        if cfg.upsd.listen == []
+        then [ defaultPort ]
+        else unique (forEach cfg.upsd.listen (listen: listen.port));
+    };
+
+    systemd.services.upsmon = let
+      secrets = mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
+      createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
+    in {
+      enable = cfg.upsmon.enable;
       description = "Uninterruptible Power Supplies (Monitor)";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      script = "${pkgs.nut}/sbin/upsmon";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsmonConf}";
+        ExecStart = "${pkgs.nut}/sbin/upsmon";
+        ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
+        LoadCredential = mapAttrsToList (name: monitor: "upsmon_password_${name}:${monitor.passwordFile}") cfg.upsmon.monitor;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
-    systemd.services.upsd = {
+    systemd.services.upsd = let
+      secrets = mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
+      createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
+    in {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Daemon)";
       after = [ "network.target" "upsmon.service" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/sbin/upsd -u root";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsdUsers}";
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/sbin/upsd -u root";
+        ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
+        LoadCredential = mapAttrsToList (name: user: "upsdusers_password_${name}:${user.passwordFile}") cfg.users;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
+      restartTriggers = [
+        config.environment.etc."nut/upsd.conf".source
+      ];
     };
 
     systemd.services.upsdrv = {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Register all UPS)";
       after = [ "upsd.service" ];
       wantedBy = [ "multi-user.target" ];
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/bin/upsdrvctl -u root start";
       serviceConfig = {
         Type = "oneshot";
         RemainAfterExit = true;
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/bin/upsdrvctl -u root start";
       };
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
     environment.etc = {
@@ -223,24 +571,23 @@ in
         ''
           maxstartdelay = ${toString cfg.maxStartDelay}
 
-          ${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
-
-          "}
+          ${concatStringsSep "\n\n" (forEach (attrValues cfg.ups) (ups: ups.summary))}
+        '';
+      "nut/upsd.conf".source = pkgs.writeText "upsd.conf"
+        ''
+          ${concatStringsSep "\n" (forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}"))}
+          ${cfg.upsd.extraConfig}
         '';
       "nut/upssched.conf".source = cfg.schedulerRules;
-      # These file are containing private information and thus should not
-      # be stored inside the Nix store.
-      /*
-      "nut/upsd.conf".source = "";
-      "nut/upsd.users".source = "";
-      "nut/upsmon.conf".source = "";
-      */
+      "nut/upsd.users".source = "/run/nut/upsd.users";
+      "nut/upsmon.conf".source = "/run/nut/upsmon.conf";
     };
 
     power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
 
     systemd.tmpfiles.rules = [
       "d /var/state/ups -"
+      "d /var/lib/nut 700"
     ];
 
 
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index de51843ba6f9c..89b30996e8faa 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -42,6 +42,7 @@ in
 {
   imports = [
     (lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
+    (lib.mkRenamedOptionModule [ "services" "avahi" "nssmdns" ] [ "services" "avahi" "nssmdns4" ])
   ];
 
   options.services.avahi = {
@@ -93,7 +94,7 @@ in
 
     ipv6 = mkOption {
       type = types.bool;
-      default = config.networking.enableIPv6;
+      default = false;
       defaultText = literalExpression "config.networking.enableIPv6";
       description = lib.mdDoc "Whether to use IPv6.";
     };
@@ -218,13 +219,28 @@ in
       };
     };
 
-    nssmdns = mkOption {
+    nssmdns4 = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv4.
+        Enabling it allows applications to resolve names in the `.local`
+        domain by transparently querying the Avahi daemon.
+      '';
+    };
+
+    nssmdns6 = mkOption {
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
-        Whether to enable the mDNS NSS (Name Service Switch) plug-in.
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv6.
         Enabling it allows applications to resolve names in the `.local`
         domain by transparently querying the Avahi daemon.
+
+        ::: {.note}
+        Due to the fact that most mDNS responders only register local IPv4 addresses,
+        most user want to leave this option disabled to avoid long timeouts when applications first resolve the none existing IPv6 address.
+        :::
       '';
     };
 
@@ -256,9 +272,18 @@ in
 
     users.groups.avahi = { };
 
-    system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
-    system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
-      (mkBefore [ "mdns_minimal [NOTFOUND=return]" ]) # before resolve
+    system.nssModules = optional (cfg.nssmdns4 || cfg.nssmdns6) pkgs.nssmdns;
+    system.nssDatabases.hosts = let
+      mdnsMinimal = if (cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns_minimal"
+      else if (!cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns6_minimal"
+      else if (cfg.nssmdns4 && !cfg.nssmdns6) then
+        "mdns4_minimal"
+      else
+        "";
+    in optionals (cfg.nssmdns4 || cfg.nssmdns6) (mkMerge [
+      (mkBefore [ "${mdnsMinimal} [NOTFOUND=return]" ]) # before resolve
       (mkAfter [ "mdns" ]) # after dns
     ]);
 
diff --git a/nixos/modules/services/networking/ddclient.nix b/nixos/modules/services/networking/ddclient.nix
index 8f4fb0bc78d4e..a67f0c5de9ba3 100644
--- a/nixos/modules/services/networking/ddclient.nix
+++ b/nixos/modules/services/networking/ddclient.nix
@@ -126,7 +126,7 @@ with lib;
         default = "dyndns2";
         type = str;
         description = lib.mdDoc ''
-          Protocol to use with dynamic DNS provider (see https://sourceforge.net/p/ddclient/wiki/protocols).
+          Protocol to use with dynamic DNS provider (see https://ddclient.net/protocols.html ).
         '';
       };
 
diff --git a/nixos/modules/services/networking/ejabberd.nix b/nixos/modules/services/networking/ejabberd.nix
index b10a3d9f21df6..78af256f9c81b 100644
--- a/nixos/modules/services/networking/ejabberd.nix
+++ b/nixos/modules/services/networking/ejabberd.nix
@@ -120,6 +120,12 @@ in {
         if [ -z "$(ls -A '${cfg.spoolDir}')" ]; then
           touch "${cfg.spoolDir}/.firstRun"
         fi
+
+        if ! test -e ${cfg.spoolDir}/.erlang.cookie; then
+          touch ${cfg.spoolDir}/.erlang.cookie
+          chmod 600 ${cfg.spoolDir}/.erlang.cookie
+          dd if=/dev/random bs=16 count=1 | base64 > ${cfg.spoolDir}/.erlang.cookie
+        fi
       '';
 
       postStart = ''
diff --git a/nixos/modules/services/networking/harmonia.nix b/nixos/modules/services/networking/harmonia.nix
index beaa7d00b6ce8..d0f4a8a6e6333 100644
--- a/nixos/modules/services/networking/harmonia.nix
+++ b/nixos/modules/services/networking/harmonia.nix
@@ -29,6 +29,11 @@ in
 
   config = lib.mkIf cfg.enable {
     nix.settings.extra-allowed-users = [ "harmonia" ];
+    users.users.harmonia = {
+      isSystemUser = true;
+      group = "harmonia";
+    };
+    users.groups.harmonia = { };
 
     systemd.services.harmonia = {
       description = "harmonia binary cache service";
@@ -50,7 +55,6 @@ in
         ExecStart = lib.getExe cfg.package;
         User = "harmonia";
         Group = "harmonia";
-        DynamicUser = true;
         PrivateUsers = true;
         DeviceAllow = [ "" ];
         UMask = "0066";
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index b74f5d0bec9b8..d46c1a69a6197 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -64,8 +64,10 @@ in
     };
 
     systemd.services.iwd = {
+      path = [ config.networking.resolvconf.package ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ configFile ];
+      serviceConfig.ReadWritePaths = "-/etc/resolv.conf";
     };
   };
 
diff --git a/nixos/modules/services/networking/jigasi.nix b/nixos/modules/services/networking/jigasi.nix
new file mode 100644
index 0000000000000..8d2d25c6edfc6
--- /dev/null
+++ b/nixos/modules/services/networking/jigasi.nix
@@ -0,0 +1,237 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jigasi;
+  homeDirName = "jigasi-home";
+  stateDir = "/tmp";
+  sipCommunicatorPropertiesFile = "${stateDir}/${homeDirName}/sip-communicator.properties";
+  sipCommunicatorPropertiesFileUnsubstituted = "${pkgs.jigasi}/etc/jitsi/jigasi/sip-communicator.properties";
+in
+{
+  options.services.jigasi = with types; {
+    enable = mkEnableOption "Jitsi Gateway to SIP - component of Jitsi Meet";
+
+    xmppHost = mkOption {
+      type = str;
+      example = "localhost";
+      description = ''
+        Hostname of the XMPP server to connect to.
+      '';
+    };
+
+    xmppDomain = mkOption {
+      type = nullOr str;
+      example = "meet.example.org";
+      description = ''
+        Domain name of the XMMP server to which to connect as a component.
+
+        If null, <option>xmppHost</option> is used.
+      '';
+    };
+
+    componentPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-component";
+      description = ''
+        Path to file containing component secret.
+      '';
+    };
+
+    userName = mkOption {
+      type = str;
+      default = "callcontrol";
+      description = ''
+        User part of the JID for XMPP user connection.
+      '';
+    };
+
+    userDomain = mkOption {
+      type = str;
+      example = "internal.meet.example.org";
+      description = ''
+        Domain part of the JID for XMPP user connection.
+      '';
+    };
+
+    userPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-user";
+      description = ''
+        Path to file containing password for XMPP user connection.
+      '';
+    };
+
+    bridgeMuc = mkOption {
+      type = str;
+      example = "jigasibrewery@internal.meet.example.org";
+      description = ''
+        JID of the internal MUC used to communicate with Videobridges.
+      '';
+    };
+
+    defaultJvbRoomName = mkOption {
+      type = str;
+      default = "";
+      example = "siptest";
+      description = ''
+        Name of the default JVB room that will be joined if no special header is included in SIP invite.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        File containing environment variables to be passed to the jigasi service,
+        in which secret tokens can be specified securely by defining values for
+        <literal>JIGASI_SIPUSER</literal>,
+        <literal>JIGASI_SIPPWD</literal>,
+        <literal>JIGASI_SIPSERVER</literal> and
+        <literal>JIGASI_SIPPORT</literal>.
+      '';
+    };
+
+    config = mkOption {
+      type = attrsOf str;
+      default = { };
+      example = literalExample ''
+        {
+          "org.jitsi.jigasi.auth.URL" = "XMPP:jitsi-meet.example.com";
+        }
+      '';
+      description = ''
+        Contents of the <filename>sip-communicator.properties</filename> configuration file for jigasi.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.jicofo.config = {
+      "org.jitsi.jicofo.jigasi.BREWERY" = "${cfg.bridgeMuc}";
+    };
+
+    services.jigasi.config = mapAttrs (_: v: mkDefault v) {
+      "org.jitsi.jigasi.BRIDGE_MUC" = cfg.bridgeMuc;
+    };
+
+    users.groups.jitsi-meet = {};
+
+    systemd.services.jigasi = let
+      jigasiProps = {
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "${stateDir}";
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "${homeDirName}";
+        "-Djava.util.logging.config.file" = "${pkgs.jigasi}/etc/jitsi/jigasi/logging.properties";
+      };
+    in
+    {
+      description = "Jitsi Gateway to SIP";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        [ -f "${sipCommunicatorPropertiesFile}" ] && rm -f "${sipCommunicatorPropertiesFile}"
+        mkdir -p "$(dirname ${sipCommunicatorPropertiesFile})"
+        temp="${sipCommunicatorPropertiesFile}.unsubstituted"
+
+        export DOMAIN_BASE="${cfg.xmppDomain}"
+        export JIGASI_XMPP_PASSWORD=$(cat "${cfg.userPasswordFile}")
+        export JIGASI_DEFAULT_JVB_ROOM_NAME="${cfg.defaultJvbRoomName}"
+
+        # encode the credentials to base64
+        export JIGASI_SIPPWD=$(echo -n "$JIGASI_SIPPWD" | base64 -w 0)
+        export JIGASI_XMPP_PASSWORD_BASE64=$(cat "${cfg.userPasswordFile}" | base64 -w 0)
+
+        cp "${sipCommunicatorPropertiesFileUnsubstituted}" "$temp"
+        chmod 644 "$temp"
+        cat <<EOF >>"$temp"
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.SERVER_PORT=$JIGASI_SIPPORT
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.PREFERRED_TRANSPORT=udp
+        EOF
+        chmod 444 "$temp"
+
+        # Replace <<$VAR_NAME>> from example config to $VAR_NAME for environment substitution
+        sed -i -E \
+          's/<<([^>]+)>>/\$\1/g' \
+          "$temp"
+
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.PASSWORD=\).*|\1\$JIGASI_XMPP_PASSWORD_BASE64|g' \
+          "$temp"
+
+        sed -i \
+          's|\(#\)\(org.jitsi.jigasi.DEFAULT_JVB_ROOM_NAME=\).*|\2\$JIGASI_DEFAULT_JVB_ROOM_NAME|g' \
+          "$temp"
+
+        ${pkgs.envsubst}/bin/envsubst \
+          -o "${sipCommunicatorPropertiesFile}" \
+          -i "$temp"
+
+        # Set the brewery room name
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.BREWERY=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+        sed -i \
+          's|\(org\.jitsi\.jigasi\.ALLOWED_JID=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+
+
+        # Disable certificate verification for self-signed certificates
+        sed -i \
+          's|\(# \)\(net.java.sip.communicator.service.gui.ALWAYS_TRUST_MODE_ENABLED=true\)|\2|g' \
+          "${sipCommunicatorPropertiesFile}"
+      '';
+
+      restartTriggers = [
+        config.environment.etc."jitsi/jigasi/sip-communicator.properties".source
+      ];
+      environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jigasiProps);
+
+      script = ''
+        ${pkgs.jigasi}/bin/jigasi \
+          --host="${cfg.xmppHost}" \
+          --domain="${if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain}" \
+          --secret="$(cat ${cfg.componentPasswordFile})" \
+          --user_name="${cfg.userName}" \
+          --user_domain="${cfg.userDomain}" \
+          --user_password="$(cat ${cfg.userPasswordFile})" \
+          --configdir="${stateDir}" \
+          --configdirname="${homeDirName}"
+      '';
+
+      serviceConfig = {
+        Type = "exec";
+
+        DynamicUser = true;
+        User = "jigasi";
+        Group = "jitsi-meet";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        StateDirectory = baseNameOf stateDir;
+        EnvironmentFile = cfg.environmentFile;
+      };
+    };
+
+    environment.etc."jitsi/jigasi/sip-communicator.properties".source =
+      mkDefault "${sipCommunicatorPropertiesFile}";
+    environment.etc."jitsi/jigasi/logging.properties".source =
+      mkDefault "${stateDir}/logging.properties-journal";
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixos/modules/services/networking/teamspeak3.nix b/nixos/modules/services/networking/teamspeak3.nix
index f09ef1a959ed4..ff41539a6d9b7 100644
--- a/nixos/modules/services/networking/teamspeak3.nix
+++ b/nixos/modules/services/networking/teamspeak3.nix
@@ -50,7 +50,7 @@ in
       };
 
       defaultVoicePort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 9987;
         description = lib.mdDoc ''
           Default UDP port for clients to connect to virtual servers - used for first virtual server, subsequent ones will open on incrementing port numbers by default.
@@ -67,7 +67,7 @@ in
       };
 
       fileTransferPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 30033;
         description = lib.mdDoc ''
           TCP port opened for file transfers.
@@ -84,10 +84,26 @@ in
       };
 
       queryPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 10011;
         description = lib.mdDoc ''
-          TCP port opened for ServerQuery connections.
+          TCP port opened for ServerQuery connections using the raw telnet protocol.
+        '';
+      };
+
+      querySshPort = mkOption {
+        type = types.port;
+        default = 10022;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the SSH protocol.
+        '';
+      };
+
+      queryHttpPort = mkOption {
+        type = types.port;
+        default = 10080;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the HTTP protocol.
         '';
       };
 
@@ -128,7 +144,9 @@ in
     ];
 
     networking.firewall = mkIf cfg.openFirewall {
-      allowedTCPPorts = [ cfg.fileTransferPort ] ++ optionals (cfg.openFirewallServerQuery) [ cfg.queryPort (cfg.queryPort + 11) ];
+      allowedTCPPorts = [ cfg.fileTransferPort ] ++ (map (port:
+        mkIf cfg.openFirewallServerQuery port
+      ) [cfg.queryPort cfg.querySshPort cfg.queryHttpPort]);
       # subsequent vServers will use the incremented voice port, let's just open the next 10
       allowedUDPPortRanges = [ { from = cfg.defaultVoicePort; to = cfg.defaultVoicePort + 10; } ];
     };
@@ -141,13 +159,19 @@ in
       serviceConfig = {
         ExecStart = ''
           ${ts3}/bin/ts3server \
-            dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
-            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            dbsqlpath=${ts3}/lib/teamspeak/sql/ \
+            logpath=${cfg.logPath} \
+            license_accepted=1 \
             default_voice_port=${toString cfg.defaultVoicePort} \
-            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             filetransfer_port=${toString cfg.fileTransferPort} \
+            query_port=${toString cfg.queryPort} \
+            query_ssh_port=${toString cfg.querySshPort} \
+            query_http_port=${toString cfg.queryHttpPort} \
+            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             ${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
-            query_port=${toString cfg.queryPort} license_accepted=1
+            ${optionalString (cfg.queryIP != null) "query_ssh_ip=${cfg.queryIP}"} \
+            ${optionalString (cfg.queryIP != null) "query_http_ip=${cfg.queryIP}"} \
         '';
         WorkingDirectory = cfg.dataDir;
         User = user;
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 539a322b431f6..d238b26a226b5 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -3,6 +3,12 @@ with lib;
 let
   cfg = config.services.hound;
 in {
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "hound" "extraGroups" ] "Use users.users.hound.extraGroups instead")
+  ];
+
+  meta.maintainers = with maintainers; [ SuperSandro2000 ];
+
   options = {
     services.hound = {
       enable = mkOption {
@@ -13,6 +19,8 @@ in {
         '';
       };
 
+      package = mkPackageOptionMD pkgs "hound" { };
+
       user = mkOption {
         default = "hound";
         type = types.str;
@@ -29,27 +37,15 @@ in {
         '';
       };
 
-      extraGroups = mkOption {
-        type = types.listOf types.str;
-        default = [ ];
-        example = [ "dialout" ];
-        description = lib.mdDoc ''
-          List of extra groups that the "hound" user should be a part of.
-        '';
-      };
-
       home = mkOption {
         default = "/var/lib/hound";
         type = types.path;
         description = lib.mdDoc ''
-          The path to use as hound's $HOME. If the default user
-          "hound" is configured then this is the home of the "hound"
-          user.
+          The path to use as hound's $HOME.
+          If the default user "hound" is configured then this is the home of the "hound" user.
         '';
       };
 
-      package = mkPackageOption pkgs "hound" { };
-
       config = mkOption {
         type = types.str;
         description = lib.mdDoc ''
@@ -57,63 +53,62 @@ in {
           should be an absolute path to a writable location on disk.
         '';
         example = literalExpression ''
-          '''
-            {
-              "max-concurrent-indexers" : 2,
-              "dbpath" : "''${services.hound.home}/data",
-              "repos" : {
-                  "nixpkgs": {
-                    "url" : "https://www.github.com/NixOS/nixpkgs.git"
-                  }
-              }
+          {
+            "max-concurrent-indexers" : 2,
+            "repos" : {
+                "nixpkgs": {
+                  "url" : "https://www.github.com/NixOS/nixpkgs.git"
+                }
             }
-          '''
+          }
         '';
       };
 
       listen = mkOption {
         type = types.str;
         default = "0.0.0.0:6080";
-        example = "127.0.0.1:6080 or just :6080";
+        example = ":6080";
         description = lib.mdDoc ''
-          Listen on this IP:port / :port
+          Listen on this [IP]:port
         '';
       };
     };
   };
 
   config = mkIf cfg.enable {
-    users.groups = optionalAttrs (cfg.group == "hound") {
-      hound.gid = config.ids.gids.hound;
+    users.groups = lib.mkIf (cfg.group == "hound") {
+      hound = { };
     };
 
-    users.users = optionalAttrs (cfg.user == "hound") {
+    users.users = lib.mkIf (cfg.user == "hound") {
       hound = {
-        description = "hound code search";
+        description = "Hound code search";
         createHome = true;
-        home = cfg.home;
-        group = cfg.group;
-        extraGroups = cfg.extraGroups;
-        uid = config.ids.uids.hound;
+        isSystemUser = true;
+        inherit (cfg) home group;
       };
     };
 
-    systemd.services.hound = {
+    systemd.services.hound = let
+      configFile = pkgs.writeTextFile {
+        name = "hound.json";
+        text = cfg.config;
+        checkPhase = ''
+          # check if the supplied text is valid json
+          ${lib.getExe pkgs.jq} . $target > /dev/null
+        '';
+      };
+    in {
       description = "Hound Code Search";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
-
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.home;
         ExecStartPre = "${pkgs.git}/bin/git config --global --replace-all http.sslCAinfo /etc/ssl/certs/ca-certificates.crt";
-        ExecStart = "${cfg.package}/bin/houndd" +
-                    " -addr ${cfg.listen}" +
-                    " -conf ${pkgs.writeText "hound.json" cfg.config}";
-
+        ExecStart = "${cfg.package}/bin/houndd -addr ${cfg.listen} -conf ${configFile}";
       };
     };
   };
-
 }
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index 72a195d3a04ed..d3164373ec01f 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -3,7 +3,6 @@ with lib;
 let
   clamavUser = "clamav";
   stateDir = "/var/lib/clamav";
-  runDir = "/run/clamav";
   clamavGroup = clamavUser;
   cfg = config.services.clamav;
   pkg = pkgs.clamav;
@@ -99,6 +98,29 @@ in
           '';
         };
       };
+
+      scanner = {
+        enable = mkEnableOption (lib.mdDoc "ClamAV scanner");
+
+        interval = mkOption {
+          type = types.str;
+          default = "*-*-* 04:00:00";
+          description = lib.mdDoc ''
+            How often clamdscan is invoked. See systemd.time(7) for more
+            information about the format.
+            By default this runs using 10 cores at most, be sure to run it at a time of low traffic.
+          '';
+        };
+
+        scanDirectories = mkOption {
+          type = with types; listOf str;
+          default = [ "/home" "/var/lib" "/tmp" "/etc" "/var/tmp" ];
+          description = lib.mdDoc ''
+            List of directories to scan.
+            The default includes everything I could think of that is valid for nixos. Feel free to contribute a PR to add to the default if you see something missing.
+          '';
+        };
+      };
     };
   };
 
@@ -117,9 +139,8 @@ in
 
     services.clamav.daemon.settings = {
       DatabaseDirectory = stateDir;
-      LocalSocket = "${runDir}/clamd.ctl";
-      PidFile = "${runDir}/clamd.pid";
-      TemporaryDirectory = "/tmp";
+      LocalSocket = "/run/clamav/clamd.ctl";
+      PidFile = "/run/clamav/clamd.pid";
       User = "clamav";
       Foreground = true;
     };
@@ -182,7 +203,6 @@ in
         ExecStart = "${pkg}/bin/freshclam";
         SuccessExitStatus = "1"; # if databases are up to date
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -204,7 +224,6 @@ in
       serviceConfig = {
         Type = "oneshot";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -230,12 +249,31 @@ in
         Type = "oneshot";
         ExecStart = "${pkgs.fangfrisch}/bin/fangfrisch --conf ${fangfrischConfigFile} refresh";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
         PrivateDevices = "yes";
       };
     };
+
+    systemd.timers.clamdscan = mkIf cfg.scanner.enable {
+      description = "Timer for ClamAV virus scanner";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.scanner.interval;
+        Unit = "clamdscan.service";
+      };
+    };
+
+    systemd.services.clamdscan = mkIf cfg.scanner.enable {
+      description = "ClamAV virus scanner";
+      after = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+      wants = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkg}/bin/clamdscan --multiscan --fdpass --infected --allmatch ${lib.concatStringsSep " " cfg.scanner.scanDirectories}";
+      };
+    };
   };
 }
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index 0cd24fb03a7bd..88537f8c4f7b6 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -333,10 +333,10 @@ in
             cfg.settings.watch-dir;
         StateDirectory = [
           "transmission"
-          "transmission/.config/transmission-daemon"
-          "transmission/.incomplete"
-          "transmission/Downloads"
-          "transmission/watch-dir"
+          "transmission/${settingsDir}"
+          "transmission/${incompleteDir}"
+          "transmission/${downloadsDir}"
+          "transmission/${watchDir}"
         ];
         StateDirectoryMode = mkDefault 750;
         # The following options are only for optimizing:
diff --git a/nixos/modules/services/web-apps/jitsi-meet.nix b/nixos/modules/services/web-apps/jitsi-meet.nix
index 0c0eb66e65b7c..c4505534d635e 100644
--- a/nixos/modules/services/web-apps/jitsi-meet.nix
+++ b/nixos/modules/services/web-apps/jitsi-meet.nix
@@ -35,6 +35,7 @@ let
       domain = cfg.hostName;
       muc = "conference.${cfg.hostName}";
       focus = "focus.${cfg.hostName}";
+      jigasi = "jigasi.${cfg.hostName}";
     };
     bosh = "//${cfg.hostName}/http-bind";
     websocket = "wss://${cfg.hostName}/xmpp-websocket";
@@ -145,6 +146,16 @@ in
       '';
     };
 
+    jigasi.enable = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to enable jigasi instance and configure it to connect to Prosody.
+
+        Additional configuration is possible with <option>services.jigasi</option>.
+      '';
+    };
+
     nginx.enable = mkOption {
       type = bool;
       default = true;
@@ -224,7 +235,7 @@ in
           roomDefaultPublicJids = true;
           extraConfig = ''
             storage = "memory"
-            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}" }
+            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}", "jigasi@auth.${cfg.hostName}" }
           '';
           #-- muc_room_cache_size = 1000
         }
@@ -263,6 +274,9 @@ in
           Component "focus.${cfg.hostName}" "client_proxy"
             target_address = "focus@auth.${cfg.hostName}"
 
+          Component "jigasi.${cfg.hostName}" "client_proxy"
+            target_address = "jigasi@auth.${cfg.hostName}"
+
           Component "speakerstats.${cfg.hostName}" "speakerstats_component"
             muc_component = "conference.${cfg.hostName}"
 
@@ -356,7 +370,10 @@ in
         ${config.services.prosody.package}/bin/prosodyctl mod_roster_command subscribe focus.${cfg.hostName} focus@auth.${cfg.hostName}
         ${config.services.prosody.package}/bin/prosodyctl register jibri auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-auth-secret)"
         ${config.services.prosody.package}/bin/prosodyctl register recorder recorder.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"
+      '' + optionalString cfg.jigasi.enable ''
+        ${config.services.prosody.package}/bin/prosodyctl register jigasi auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jigasi-user-secret)"
       '';
+
       serviceConfig = {
         EnvironmentFile = [ "/var/lib/jitsi-meet/secrets-env" ];
         SupplementaryGroups = [ "jitsi-meet" ];
@@ -371,13 +388,13 @@ in
 
     systemd.services.jitsi-meet-init-secrets = {
       wantedBy = [ "multi-user.target" ];
-      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service");
+      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service") ++ (optional cfg.jigasi.enable "jigasi.service");
       serviceConfig = {
         Type = "oneshot";
       };
 
       script = let
-        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
+        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optionals cfg.jigasi.enable [ "jigasi-user-secret" "jigasi-component-secret" ]) ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
       in
       ''
         cd /var/lib/jitsi-meet
@@ -391,6 +408,7 @@ in
 
         # for easy access in prosody
         echo "JICOFO_COMPONENT_SECRET=$(cat jicofo-component-secret)" > secrets-env
+        echo "JIGASI_COMPONENT_SECRET=$(cat jigasi-component-secret)" >> secrets-env
         chown root:jitsi-meet secrets-env
         chmod 640 secrets-env
       ''
@@ -592,6 +610,20 @@ in
         stripFromRoomDomain = "conference.";
       };
     };
+
+    services.jigasi = mkIf cfg.jigasi.enable {
+      enable = true;
+      xmppHost = "localhost";
+      xmppDomain = cfg.hostName;
+      userDomain = "auth.${cfg.hostName}";
+      userName = "jigasi";
+      userPasswordFile = "/var/lib/jitsi-meet/jigasi-user-secret";
+      componentPasswordFile = "/var/lib/jitsi-meet/jigasi-component-secret";
+      bridgeMuc = "jigasibrewery@internal.${cfg.hostName}";
+      config = {
+        "org.jitsi.jigasi.ALWAYS_TRUST_MODE_ENABLED" = "true";
+      };
+    };
   };
 
   meta.doc = ./jitsi-meet.md;
diff --git a/nixos/modules/services/web-apps/keycloak.nix b/nixos/modules/services/web-apps/keycloak.nix
index 5d44bdee64a73..6d2948913b194 100644
--- a/nixos/modules/services/web-apps/keycloak.nix
+++ b/nixos/modules/services/web-apps/keycloak.nix
@@ -25,7 +25,6 @@ let
     maintainers
     catAttrs
     collect
-    splitString
     hasPrefix
     ;
 
@@ -329,7 +328,8 @@ in
             };
 
             hostname = mkOption {
-              type = str;
+              type = nullOr str;
+              default = null;
               example = "keycloak.example.com";
               description = lib.mdDoc ''
                 The hostname part of the public URL used as base for
@@ -451,7 +451,7 @@ in
 
       keycloakConfig = lib.generators.toKeyValue {
         mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
-          mkValueString = v: with builtins;
+          mkValueString = v:
             if isInt v then toString v
             else if isString v then v
             else if true == v then "true"
@@ -480,6 +480,14 @@ in
             assertion = createLocalPostgreSQL -> config.services.postgresql.settings.standard_conforming_strings or true;
             message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
           }
+          {
+            assertion = cfg.settings.hostname != null || cfg.settings.hostname-url or null != null;
+            message = "Setting the Keycloak hostname is required, see `services.keycloak.settings.hostname`";
+          }
+          {
+            assertion = !(cfg.settings.hostname != null && cfg.settings.hostname-url or null != null);
+            message = "`services.keycloak.settings.hostname` and `services.keycloak.settings.hostname-url` are mutually exclusive";
+          }
         ];
 
         environment.systemPackages = [ keycloakBuild ];
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 8686506b1c282..7b00ce35eb1a7 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -229,7 +229,7 @@ in {
       streamingProcesses = lib.mkOption {
         description = lib.mdDoc ''
           Number of processes used by the mastodon-streaming service.
-          Recommended is the amount of your CPU cores minus one.
+          Please define this explicitly, recommended is the amount of your CPU cores minus one.
         '';
         type = lib.types.ints.positive;
         example = 3;
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index f19465eeb59a5..5035594323749 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -102,7 +102,7 @@ in
     services.mattermost = {
       enable = mkEnableOption (lib.mdDoc "Mattermost chat server");
 
-      package = mkPackageOption pkgs "mattermostl" { };
+      package = mkPackageOption pkgs "mattermost" { };
 
       statePath = mkOption {
         type = types.str;
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
index 0a530bff92325..bdb08f6131496 100644
--- a/nixos/modules/services/web-apps/mobilizon.nix
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -384,7 +384,7 @@ in
           ensureDBOwnership = false;
         }
       ];
-      extraPlugins = with postgresql.pkgs; [ postgis ];
+      extraPlugins = ps: with ps; [ postgis ];
     };
 
     # Nginx config taken from support/nginx/mobilizon-release.conf
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 6c50ea3c81ef7..db6eacf30196b 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -61,7 +61,9 @@ let
   pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
 
   # https://github.com/nextcloud/documentation/pull/11179
-  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2";
+  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2"
+    || (versionOlder cfg.package.version "27.0.0"
+      && versionAtLeast cfg.package.version "26.0.8");
 
 in {
 
@@ -238,7 +240,7 @@ in {
     };
 
     phpOptions = mkOption {
-      type = types.attrsOf types.str;
+      type = with types; attrsOf (oneOf [ str int ]);
       defaultText = literalExpression (generators.toPretty { } defaultPHPSettings);
       description = lib.mdDoc ''
         Options for PHP's php.ini file for nextcloud.
diff --git a/nixos/modules/services/web-apps/node-red.nix b/nixos/modules/services/web-apps/node-red.nix
index d775042fed164..de78f05a98ca2 100644
--- a/nixos/modules/services/web-apps/node-red.nix
+++ b/nixos/modules/services/web-apps/node-red.nix
@@ -19,7 +19,7 @@ in
   options.services.node-red = {
     enable = mkEnableOption (lib.mdDoc "the Node-RED service");
 
-    package = mkPackageOption pkgs "node-red" { };
+    package = mkPackageOption pkgs "nodePackages.node-red" { };
 
     openFirewall = mkOption {
       type = types.bool;
diff --git a/nixos/modules/services/web-apps/windmill.nix b/nixos/modules/services/web-apps/windmill.nix
new file mode 100644
index 0000000000000..8e940dabdc1f8
--- /dev/null
+++ b/nixos/modules/services/web-apps/windmill.nix
@@ -0,0 +1,177 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.windmill;
+in
+{
+  options.services.windmill = {
+    enable = lib.mkEnableOption (lib.mdDoc "windmill service");
+
+    serverPort = lib.mkOption {
+      type = lib.types.port;
+      default = 8001;
+      description = lib.mdDoc "Port the windmill server listens on.";
+    };
+
+    lspPort = lib.mkOption {
+      type = lib.types.port;
+      default = 3001;
+      description = lib.mdDoc "Port the windmill lsp listens on.";
+    };
+
+    database = {
+      name = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database named like the user.
+        default = "windmill";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database user like the name.
+        default = "windmill";
+        description = lib.mdDoc "Database user.";
+      };
+
+      urlPath = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          Path to the file containing the database url windmill should connect to. This is not deducted from database user and name as it might contain a secret
+        '';
+        example = "config.age.secrets.DATABASE_URL_FILE.path";
+      };
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to create a local database automatically.";
+      };
+    };
+
+    baseUrl = lib.mkOption {
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        The base url that windmill will be served on.
+      '';
+      example = "https://windmill.example.com";
+    };
+
+    logLevel = lib.mkOption {
+      type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
+      default = "info";
+      description = lib.mdDoc "Log level";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    services.postgresql = lib.optionalAttrs (cfg.database.createLocally) {
+      enable = lib.mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+
+   };
+
+   systemd.services =
+    let
+      serviceConfig = {
+        DynamicUser = true;
+        # using the same user to simplify db connection
+        User = cfg.database.user;
+        ExecStart = "${pkgs.windmill}/bin/windmill";
+
+        Restart = "always";
+        LoadCredential = [
+          "DATABASE_URL_FILE:${cfg.database.urlPath}"
+        ];
+      };
+    in
+    {
+
+    # coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
+    # modified to not grant priviledges on all tables
+    # create role windmill_user and windmill_admin only if they don't exist
+    postgresql.postStart = lib.mkIf cfg.database.createLocally (lib.mkAfter ''
+      $PSQL -tA <<"EOF"
+DO $$
+BEGIN
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_user'
+    ) THEN
+        CREATE ROLE windmill_user;
+        GRANT ALL PRIVILEGES ON DATABASE ${cfg.database.name} TO windmill_user;
+    ELSE
+      RAISE NOTICE 'Role "windmill_user" already exists. Skipping.';
+    END IF;
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_admin'
+    ) THEN
+      CREATE ROLE windmill_admin WITH BYPASSRLS;
+      GRANT windmill_user TO windmill_admin;
+    ELSE
+      RAISE NOTICE 'Role "windmill_admin" already exists. Skipping.';
+    END IF;
+    GRANT windmill_admin TO windmill;
+END
+$$;
+EOF
+    '');
+
+     windmill-server = {
+        description = "Windmill server";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          PORT = builtins.toString cfg.serverPort;
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "server";
+        };
+      };
+
+     windmill-worker = {
+        description = "Windmill worker";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "default";
+          KEEP_JOB_DIR = "false";
+        };
+      };
+
+     windmill-worker-native = {
+        description = "Windmill worker native";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker-native";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "native";
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index 03d5634854a37..002d6683b2ed5 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -34,7 +34,7 @@ let
       # copy additional plugin(s), theme(s) and language(s)
       ${concatStringsSep "\n" (mapAttrsToList (name: theme: "cp -r ${theme} $out/share/wordpress/wp-content/themes/${name}") cfg.themes)}
       ${concatStringsSep "\n" (mapAttrsToList (name: plugin: "cp -r ${plugin} $out/share/wordpress/wp-content/plugins/${name}") cfg.plugins)}
-      ${concatMapStringsSep "\n" (language: "cp -r ${language}/* $out/share/wordpress/wp-content/languages/") cfg.languages}
+      ${concatMapStringsSep "\n" (language: "cp -r ${language} $out/share/wordpress/wp-content/languages/") cfg.languages}
     '';
   };
 
diff --git a/nixos/modules/services/web-servers/caddy/default.nix b/nixos/modules/services/web-servers/caddy/default.nix
index 497aa9ba956e0..9a544e98cfc44 100644
--- a/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixos/modules/services/web-servers/caddy/default.nix
@@ -147,7 +147,7 @@ in
       default = configFile;
       defaultText = "A Caddyfile automatically generated by values from services.caddy.*";
       example = literalExpression ''
-        pkgs.writeTextDir "Caddyfile" '''
+        pkgs.writeText "Caddyfile" '''
           example.com
 
           root * /var/www/wordpress
@@ -164,9 +164,9 @@ in
     };
 
     adapter = mkOption {
-      default = if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null;
+      default = if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null;
       defaultText = literalExpression ''
-        if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null
+        if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null
       '';
       example = literalExpression "nginx";
       type = with types; nullOr str;
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 848d12b17f878..6ea24e65f2206 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -449,7 +449,7 @@ let
       ${optionalString (config.tryFiles != null) "try_files ${config.tryFiles};"}
       ${optionalString (config.root != null) "root ${config.root};"}
       ${optionalString (config.alias != null) "alias ${config.alias};"}
-      ${optionalString (config.return != null) "return ${config.return};"}
+      ${optionalString (config.return != null) "return ${toString config.return};"}
       ${config.extraConfig}
       ${optionalString (config.proxyPass != null && config.recommendedProxySettings) "include ${recommendedProxyConfig};"}
       ${mkBasicAuth "sublocation" config}
diff --git a/nixos/modules/services/web-servers/nginx/location-options.nix b/nixos/modules/services/web-servers/nginx/location-options.nix
index 2728852058ea7..2138e551fd434 100644
--- a/nixos/modules/services/web-servers/nginx/location-options.nix
+++ b/nixos/modules/services/web-servers/nginx/location-options.nix
@@ -93,7 +93,7 @@ with lib;
     };
 
     return = mkOption {
-      type = types.nullOr types.str;
+      type = with types; nullOr (oneOf [ str int ]);
       default = null;
       example = "301 http://example.com$request_uri";
       description = lib.mdDoc ''
diff --git a/nixos/modules/services/web-servers/nginx/tailscale-auth.nix b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
new file mode 100644
index 0000000000000..a2e4d4a30be5c
--- /dev/null
+++ b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx.tailscaleAuth;
+in
+{
+  options.services.nginx.tailscaleAuth = {
+    enable = mkEnableOption (lib.mdDoc "Enable tailscale.nginx-auth, to authenticate nginx users via tailscale.");
+
+    package = lib.mkPackageOptionMD pkgs "tailscale-nginx-auth" {};
+
+    user = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "User which runs tailscale-nginx-auth";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "Group which runs tailscale-nginx-auth";
+    };
+
+    expectedTailnet = mkOption {
+      default = "";
+      type = types.nullOr types.str;
+      example = "tailnet012345.ts.net";
+      description = lib.mdDoc ''
+        If you want to prevent node sharing from allowing users to access services
+        across tailnets, declare your expected tailnets domain here.
+      '';
+    };
+
+    socketPath = mkOption {
+      default = "/run/tailscale-nginx-auth/tailscale-nginx-auth.sock";
+      type = types.path;
+      description = lib.mdDoc ''
+        Path of the socket listening to nginx authorization requests.
+      '';
+    };
+
+    virtualHosts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        A list of nginx virtual hosts to put behind tailscale.nginx-auth
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.tailscale.enable = true;
+    services.nginx.enable = true;
+
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      inherit (cfg) group;
+    };
+    users.groups.${cfg.group} = { };
+    users.users.${config.services.nginx.user}.extraGroups = [ cfg.group ];
+    systemd.sockets.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication socket";
+      partOf = [ "tailscale-nginx-auth.service" ];
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ cfg.socketPath ];
+      socketConfig = {
+        SocketMode = "0660";
+        SocketUser = cfg.user;
+        SocketGroup = cfg.group;
+      };
+    };
+
+
+    systemd.services.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication service";
+      after = [ "nginx.service" ];
+      wants = [ "nginx.service" ];
+      requires = [ "tailscale-nginx-auth.socket" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package}";
+        RuntimeDirectory = "tailscale-nginx-auth";
+        User = cfg.user;
+        Group = cfg.group;
+
+        BindPaths = [ "/run/tailscale/tailscaled.sock" ];
+
+        CapabilityBoundingSet = "";
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+        ];
+      };
+    };
+
+    services.nginx.virtualHosts = genAttrs
+      cfg.virtualHosts
+      (vhost: {
+        locations."/auth" = {
+          extraConfig = ''
+            internal;
+
+            proxy_pass http://unix:${cfg.socketPath};
+            proxy_pass_request_body off;
+
+            # Upstream uses $http_host here, but we are using gixy to check nginx configurations
+            # gixy wants us to use $host: https://github.com/yandex/gixy/blob/master/docs/en/plugins/hostspoofing.md
+            proxy_set_header Host $host;
+            proxy_set_header Remote-Addr $remote_addr;
+            proxy_set_header Remote-Port $remote_port;
+            proxy_set_header Original-URI $request_uri;
+            proxy_set_header X-Scheme                $scheme;
+            proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+          '';
+        };
+        locations."/".extraConfig = ''
+          auth_request /auth;
+          auth_request_set $auth_user $upstream_http_tailscale_user;
+          auth_request_set $auth_name $upstream_http_tailscale_name;
+          auth_request_set $auth_login $upstream_http_tailscale_login;
+          auth_request_set $auth_tailnet $upstream_http_tailscale_tailnet;
+          auth_request_set $auth_profile_picture $upstream_http_tailscale_profile_picture;
+
+          proxy_set_header X-Webauth-User "$auth_user";
+          proxy_set_header X-Webauth-Name "$auth_name";
+          proxy_set_header X-Webauth-Login "$auth_login";
+          proxy_set_header X-Webauth-Tailnet "$auth_tailnet";
+          proxy_set_header X-Webauth-Profile-Picture "$auth_profile_picture";
+
+          ${lib.optionalString (cfg.expectedTailnet != "") ''proxy_set_header Expected-Tailnet "${cfg.expectedTailnet}";''}
+        '';
+      });
+  };
+
+  meta.maintainers = with maintainers; [ phaer ];
+
+}
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 9cc7c4381620f..027479b1ce094 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -338,6 +338,7 @@ in
 
       # Enable helpful DBus services.
       services.accounts-daemon.enable = true;
+      programs.dconf.enable = true;
       # when changing an account picture the accounts-daemon reads a temporary file containing the image which systemsettings5 may place under /tmp
       systemd.services.accounts-daemon.serviceConfig.PrivateTmp = false;
       services.power-profiles-daemon.enable = mkDefault true;
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 33261021480f1..3e10770812db6 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -1612,7 +1612,7 @@ let
         description = lib.mdDoc ''
           Each attribute in this set specifies an option in the
           `[WireGuardPeer]` section of the unit.  See
-          {manpage}`systemd.network(5)` for details.
+          {manpage}`systemd.netdev(5)` for details.
         '';
       };
     };
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index fd92a0014002c..784040f0ce9e3 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -17,7 +17,7 @@ let
   cfgZED = config.services.zfs.zed;
 
   selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
-  clevisDatasets = map (e: e.device) (filter (e: (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
+  clevisDatasets = map (e: e.device) (filter (e: e.device != null && (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
 
 
   inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
@@ -157,7 +157,7 @@ let
           poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
         fi
         if poolImported "${pool}"; then
-        ${concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets)}
+        ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
 
 
           ${optionalString keyLocations.hasKeys ''
@@ -630,7 +630,7 @@ in
               poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
             fi
 
-            ${concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets)}
+            ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
 
             ${if isBool cfgZfs.requestEncryptionCredentials
               then optionalString cfgZfs.requestEncryptionCredentials ''
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 298add13437a0..53ffaa028038d 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1396,6 +1396,8 @@ in
       "net.ipv4.conf.all.forwarding" = mkDefault (any (i: i.proxyARP) interfaces);
       "net.ipv6.conf.all.disable_ipv6" = mkDefault (!cfg.enableIPv6);
       "net.ipv6.conf.default.disable_ipv6" = mkDefault (!cfg.enableIPv6);
+      # allow all users to do ICMP echo requests (ping)
+      "net.ipv4.ping_group_range" = mkDefault "0 2147483647";
       # networkmanager falls back to "/proc/sys/net/ipv6/conf/default/use_tempaddr"
       "net.ipv6.conf.default.use_tempaddr" = tempaddrValues.${cfg.tempAddresses}.sysctl;
     } // listToAttrs (forEach interfaces
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index 47a5e462262d4..3e48f8873ed4f 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -5,7 +5,9 @@ let
   preseedFormat = pkgs.formats.yaml { };
 in
 {
-  meta.maintainers = [ lib.maintainers.adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.incus = {
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 4db4df02fe8c9..8d3a480e6dc8c 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -1,7 +1,9 @@
 { lib, config, pkgs, ... }:
 
 {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   imports = [
     ./lxc-instance-common.nix
diff --git a/nixos/modules/virtualisation/lxc.nix b/nixos/modules/virtualisation/lxc.nix
index 5bd64a5f9a565..3febb4b4f2483 100644
--- a/nixos/modules/virtualisation/lxc.nix
+++ b/nixos/modules/virtualisation/lxc.nix
@@ -2,21 +2,19 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
-
   cfg = config.virtualisation.lxc;
-
 in
 
 {
-  ###### interface
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options.virtualisation.lxc = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description =
           lib.mdDoc ''
@@ -27,8 +25,8 @@ in
       };
 
     systemConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -38,8 +36,8 @@ in
       };
 
     defaultConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -49,8 +47,8 @@ in
       };
 
     usernetConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -62,7 +60,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     environment.systemPackages = [ pkgs.lxc ];
     environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
     environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
diff --git a/nixos/modules/virtualisation/lxcfs.nix b/nixos/modules/virtualisation/lxcfs.nix
index fb0ba49f73044..b2eaec774a65c 100644
--- a/nixos/modules/virtualisation/lxcfs.nix
+++ b/nixos/modules/virtualisation/lxcfs.nix
@@ -2,18 +2,18 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.virtualisation.lxc.lxcfs;
 in {
-  meta.maintainers = [ maintainers.mic92 ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   ###### interface
   options.virtualisation.lxc.lxcfs = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           This enables LXCFS, a FUSE filesystem for LXC.
@@ -27,7 +27,7 @@ in {
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     systemd.services.lxcfs = {
       description = "FUSE filesystem for LXC";
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/virtualisation/lxd-agent.nix b/nixos/modules/virtualisation/lxd-agent.nix
index 5bcc86e3bcbe9..59dd46b74b400 100644
--- a/nixos/modules/virtualisation/lxd-agent.nix
+++ b/nixos/modules/virtualisation/lxd-agent.nix
@@ -45,7 +45,9 @@ let
     chown -R root:root "$PREFIX"
   '';
 in {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.lxd.agent.enable = lib.mkEnableOption (lib.mdDoc "Enable LXD agent");
diff --git a/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixos/modules/virtualisation/lxd-virtual-machine.nix
index ba729465ec2f8..92434cb9babf4 100644
--- a/nixos/modules/virtualisation/lxd-virtual-machine.nix
+++ b/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -6,6 +6,10 @@ let
     then "ttyS0"
     else "ttyAMA0"; # aarch64
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     ./lxc-instance-common.nix
 
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index 6f628c4a6e328..c4c856d9be30d 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -6,12 +6,14 @@ let
   cfg = config.virtualisation.lxd;
   preseedFormat = pkgs.formats.yaml {};
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
   ];
 
-  ###### interface
-
   options = {
     virtualisation.lxd = {
       enable = lib.mkOption {